i40e: fix VLAN filtering
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <assert.h>
43
44 #include <rte_string_fns.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memzone.h>
49 #include <rte_malloc.h>
50 #include <rte_memcpy.h>
51 #include <rte_alarm.h>
52 #include <rte_dev.h>
53 #include <rte_eth_ctrl.h>
54
55 #include "i40e_logs.h"
56 #include "base/i40e_prototype.h"
57 #include "base/i40e_adminq_cmd.h"
58 #include "base/i40e_type.h"
59 #include "base/i40e_register.h"
60 #include "base/i40e_dcb.h"
61 #include "i40e_ethdev.h"
62 #include "i40e_rxtx.h"
63 #include "i40e_pf.h"
64
65 /* Maximun number of MAC addresses */
66 #define I40E_NUM_MACADDR_MAX       64
67 #define I40E_CLEAR_PXE_WAIT_MS     200
68
69 /* Maximun number of capability elements */
70 #define I40E_MAX_CAP_ELE_NUM       128
71
72 /* Wait count and inteval */
73 #define I40E_CHK_Q_ENA_COUNT       1000
74 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
75
76 /* Maximun number of VSI */
77 #define I40E_MAX_NUM_VSIS          (384UL)
78
79 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
80
81 /* Flow control default timer */
82 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
83
84 /* Flow control default high water */
85 #define I40E_DEFAULT_HIGH_WATER (0x1C40/1024)
86
87 /* Flow control default low water */
88 #define I40E_DEFAULT_LOW_WATER  (0x1A40/1024)
89
90 /* Flow control enable fwd bit */
91 #define I40E_PRTMAC_FWD_CTRL   0x00000001
92
93 /* Receive Packet Buffer size */
94 #define I40E_RXPBSIZE (968 * 1024)
95
96 /* Kilobytes shift */
97 #define I40E_KILOSHIFT 10
98
99 /* Receive Average Packet Size in Byte*/
100 #define I40E_PACKET_AVERAGE_SIZE 128
101
102 /* Mask of PF interrupt causes */
103 #define I40E_PFINT_ICR0_ENA_MASK ( \
104                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
105                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
106                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
107                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
108                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
109                 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
110                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
111                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
112                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
113                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
114
115 #define I40E_FLOW_TYPES ( \
116         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
117         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
118         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
119         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
120         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
121         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
122         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
123         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
124         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
125         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
126         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
127
128 /* Additional timesync values. */
129 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
130 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
131 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
132 #define I40E_PRTTSYN_TSYNENA     0x80000000
133 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
134 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
135
136 #define I40E_MAX_PERCENT            100
137 #define I40E_DEFAULT_DCB_APP_NUM    1
138 #define I40E_DEFAULT_DCB_APP_PRIO   3
139
140 #define I40E_PRTQF_FD_INSET(_i, _j)  (0x00250000 + ((_i) * 64 + (_j) * 32))
141 #define I40E_GLQF_FD_MSK(_i, _j)     (0x00267200 + ((_i) * 4 + (_j) * 8))
142 #define I40E_GLQF_FD_MSK_FIELD       0x0000FFFF
143 #define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8))
144 #define I40E_GLQF_HASH_MSK(_i, _j)   (0x00267A00 + ((_i) * 4 + (_j) * 8))
145 #define I40E_GLQF_HASH_MSK_FIELD      0x0000FFFF
146
147 #define I40E_INSET_NONE            0x00000000000000000ULL
148
149 /* bit0 ~ bit 7 */
150 #define I40E_INSET_DMAC            0x0000000000000001ULL
151 #define I40E_INSET_SMAC            0x0000000000000002ULL
152 #define I40E_INSET_VLAN_OUTER      0x0000000000000004ULL
153 #define I40E_INSET_VLAN_INNER      0x0000000000000008ULL
154 #define I40E_INSET_VLAN_TUNNEL     0x0000000000000010ULL
155
156 /* bit 8 ~ bit 15 */
157 #define I40E_INSET_IPV4_SRC        0x0000000000000100ULL
158 #define I40E_INSET_IPV4_DST        0x0000000000000200ULL
159 #define I40E_INSET_IPV6_SRC        0x0000000000000400ULL
160 #define I40E_INSET_IPV6_DST        0x0000000000000800ULL
161 #define I40E_INSET_SRC_PORT        0x0000000000001000ULL
162 #define I40E_INSET_DST_PORT        0x0000000000002000ULL
163 #define I40E_INSET_SCTP_VT         0x0000000000004000ULL
164
165 /* bit 16 ~ bit 31 */
166 #define I40E_INSET_IPV4_TOS        0x0000000000010000ULL
167 #define I40E_INSET_IPV4_PROTO      0x0000000000020000ULL
168 #define I40E_INSET_IPV4_TTL        0x0000000000040000ULL
169 #define I40E_INSET_IPV6_TC         0x0000000000080000ULL
170 #define I40E_INSET_IPV6_FLOW       0x0000000000100000ULL
171 #define I40E_INSET_IPV6_NEXT_HDR   0x0000000000200000ULL
172 #define I40E_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
173 #define I40E_INSET_TCP_FLAGS       0x0000000000800000ULL
174
175 /* bit 32 ~ bit 47, tunnel fields */
176 #define I40E_INSET_TUNNEL_IPV4_DST       0x0000000100000000ULL
177 #define I40E_INSET_TUNNEL_IPV6_DST       0x0000000200000000ULL
178 #define I40E_INSET_TUNNEL_DMAC           0x0000000400000000ULL
179 #define I40E_INSET_TUNNEL_SRC_PORT       0x0000000800000000ULL
180 #define I40E_INSET_TUNNEL_DST_PORT       0x0000001000000000ULL
181 #define I40E_INSET_TUNNEL_ID             0x0000002000000000ULL
182
183 /* bit 48 ~ bit 55 */
184 #define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
185
186 /* bit 56 ~ bit 63, Flex Payload */
187 #define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
188 #define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
189 #define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
190 #define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
191 #define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
192 #define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
193 #define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
194 #define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
195 #define I40E_INSET_FLEX_PAYLOAD \
196         (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
197         I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
198         I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
199         I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
200
201 /**
202  * Below are values for writing un-exposed registers suggested
203  * by silicon experts
204  */
205 /* Destination MAC address */
206 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
207 /* Source MAC address */
208 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
209 /* VLAN tag in the outer L2 header */
210 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0080000000000000ULL
211 /* VLAN tag in the inner L2 header */
212 #define I40E_REG_INSET_L2_INNER_VLAN             0x0100000000000000ULL
213 /* Source IPv4 address */
214 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
215 /* Destination IPv4 address */
216 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
217 /* IPv4 Type of Service (TOS) */
218 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
219 /* IPv4 Protocol */
220 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
221 /* Source IPv6 address */
222 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
223 /* Destination IPv6 address */
224 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
225 /* IPv6 Traffic Class (TC) */
226 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
227 /* IPv6 Next Header */
228 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
229 /* Source L4 port */
230 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
231 /* Destination L4 port */
232 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
233 /* SCTP verification tag */
234 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
235 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
236 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
237 /* Source port of tunneling UDP */
238 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
239 /* Destination port of tunneling UDP */
240 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
241 /* UDP Tunneling ID, NVGRE/GRE key */
242 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
243 /* Last ether type */
244 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
245 /* Tunneling outer destination IPv4 address */
246 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
247 /* Tunneling outer destination IPv6 address */
248 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
249 /* 1st word of flex payload */
250 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
251 /* 2nd word of flex payload */
252 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
253 /* 3rd word of flex payload */
254 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
255 /* 4th word of flex payload */
256 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
257 /* 5th word of flex payload */
258 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
259 /* 6th word of flex payload */
260 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
261 /* 7th word of flex payload */
262 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
263 /* 8th word of flex payload */
264 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
265
266 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
267
268 #define I40E_TRANSLATE_INSET 0
269 #define I40E_TRANSLATE_REG   1
270
271 #define I40E_INSET_IPV4_TOS_MASK      0x0009FF00UL
272 #define I40E_INSET_IPV4_PROTO_MASK    0x000DFF00UL
273 #define I40E_INSET_IPV6_TC_MASK       0x0009F00FUL
274 #define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
275
276 #define I40E_GL_SWT_L2TAGCTRL(_i)             (0x001C0A70 + ((_i) * 4))
277 #define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
278 #define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK  \
279         I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
280
281 /* PCI offset for querying capability */
282 #define PCI_DEV_CAP_REG            0xA4
283 /* PCI offset for enabling/disabling Extended Tag */
284 #define PCI_DEV_CTRL_REG           0xA8
285 /* Bit mask of Extended Tag capability */
286 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
287 /* Bit shift of Extended Tag enable/disable */
288 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
289 /* Bit mask of Extended Tag enable/disable */
290 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
291
292 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
293 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
294 static int i40e_dev_configure(struct rte_eth_dev *dev);
295 static int i40e_dev_start(struct rte_eth_dev *dev);
296 static void i40e_dev_stop(struct rte_eth_dev *dev);
297 static void i40e_dev_close(struct rte_eth_dev *dev);
298 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
299 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
300 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
301 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
302 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
303 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
304 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
305                                struct rte_eth_stats *stats);
306 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
307                                struct rte_eth_xstats *xstats, unsigned n);
308 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
309 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
310                                             uint16_t queue_id,
311                                             uint8_t stat_idx,
312                                             uint8_t is_rx);
313 static void i40e_dev_info_get(struct rte_eth_dev *dev,
314                               struct rte_eth_dev_info *dev_info);
315 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
316                                 uint16_t vlan_id,
317                                 int on);
318 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
319                               enum rte_vlan_type vlan_type,
320                               uint16_t tpid);
321 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
322 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
323                                       uint16_t queue,
324                                       int on);
325 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
326 static int i40e_dev_led_on(struct rte_eth_dev *dev);
327 static int i40e_dev_led_off(struct rte_eth_dev *dev);
328 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
329                               struct rte_eth_fc_conf *fc_conf);
330 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
331                               struct rte_eth_fc_conf *fc_conf);
332 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
333                                        struct rte_eth_pfc_conf *pfc_conf);
334 static void i40e_macaddr_add(struct rte_eth_dev *dev,
335                           struct ether_addr *mac_addr,
336                           uint32_t index,
337                           uint32_t pool);
338 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
339 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
340                                     struct rte_eth_rss_reta_entry64 *reta_conf,
341                                     uint16_t reta_size);
342 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
343                                    struct rte_eth_rss_reta_entry64 *reta_conf,
344                                    uint16_t reta_size);
345
346 static int i40e_get_cap(struct i40e_hw *hw);
347 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
348 static int i40e_pf_setup(struct i40e_pf *pf);
349 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
350 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
351 static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
352 static int i40e_dcb_setup(struct rte_eth_dev *dev);
353 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
354                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
355 static void i40e_stat_update_48(struct i40e_hw *hw,
356                                uint32_t hireg,
357                                uint32_t loreg,
358                                bool offset_loaded,
359                                uint64_t *offset,
360                                uint64_t *stat);
361 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
362 static void i40e_dev_interrupt_handler(
363                 __rte_unused struct rte_intr_handle *handle, void *param);
364 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
365                                 uint32_t base, uint32_t num);
366 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
367 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
368                         uint32_t base);
369 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
370                         uint16_t num);
371 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
372 static int i40e_veb_release(struct i40e_veb *veb);
373 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
374                                                 struct i40e_vsi *vsi);
375 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
376 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
377 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
378                                              struct i40e_macvlan_filter *mv_f,
379                                              int num,
380                                              struct ether_addr *addr);
381 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
382                                              struct i40e_macvlan_filter *mv_f,
383                                              int num,
384                                              uint16_t vlan);
385 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
386 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
387                                     struct rte_eth_rss_conf *rss_conf);
388 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
389                                       struct rte_eth_rss_conf *rss_conf);
390 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
391                                         struct rte_eth_udp_tunnel *udp_tunnel);
392 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
393                                         struct rte_eth_udp_tunnel *udp_tunnel);
394 static int i40e_ethertype_filter_set(struct i40e_pf *pf,
395                         struct rte_eth_ethertype_filter *filter,
396                         bool add);
397 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
398                                 enum rte_filter_op filter_op,
399                                 void *arg);
400 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
401                                 enum rte_filter_type filter_type,
402                                 enum rte_filter_op filter_op,
403                                 void *arg);
404 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
405                                   struct rte_eth_dcb_info *dcb_info);
406 static void i40e_configure_registers(struct i40e_hw *hw);
407 static void i40e_hw_init(struct rte_eth_dev *dev);
408 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
409 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
410                         struct rte_eth_mirror_conf *mirror_conf,
411                         uint8_t sw_id, uint8_t on);
412 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
413
414 static int i40e_timesync_enable(struct rte_eth_dev *dev);
415 static int i40e_timesync_disable(struct rte_eth_dev *dev);
416 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
417                                            struct timespec *timestamp,
418                                            uint32_t flags);
419 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
420                                            struct timespec *timestamp);
421 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
422
423 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
424
425 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
426                                    struct timespec *timestamp);
427 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
428                                     const struct timespec *timestamp);
429
430 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
431                                          uint16_t queue_id);
432 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
433                                           uint16_t queue_id);
434
435
436 static const struct rte_pci_id pci_id_i40e_map[] = {
437 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
438 #include "rte_pci_dev_ids.h"
439 { .vendor_id = 0, /* sentinel */ },
440 };
441
442 static const struct eth_dev_ops i40e_eth_dev_ops = {
443         .dev_configure                = i40e_dev_configure,
444         .dev_start                    = i40e_dev_start,
445         .dev_stop                     = i40e_dev_stop,
446         .dev_close                    = i40e_dev_close,
447         .promiscuous_enable           = i40e_dev_promiscuous_enable,
448         .promiscuous_disable          = i40e_dev_promiscuous_disable,
449         .allmulticast_enable          = i40e_dev_allmulticast_enable,
450         .allmulticast_disable         = i40e_dev_allmulticast_disable,
451         .dev_set_link_up              = i40e_dev_set_link_up,
452         .dev_set_link_down            = i40e_dev_set_link_down,
453         .link_update                  = i40e_dev_link_update,
454         .stats_get                    = i40e_dev_stats_get,
455         .xstats_get                   = i40e_dev_xstats_get,
456         .stats_reset                  = i40e_dev_stats_reset,
457         .xstats_reset                 = i40e_dev_stats_reset,
458         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
459         .dev_infos_get                = i40e_dev_info_get,
460         .vlan_filter_set              = i40e_vlan_filter_set,
461         .vlan_tpid_set                = i40e_vlan_tpid_set,
462         .vlan_offload_set             = i40e_vlan_offload_set,
463         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
464         .vlan_pvid_set                = i40e_vlan_pvid_set,
465         .rx_queue_start               = i40e_dev_rx_queue_start,
466         .rx_queue_stop                = i40e_dev_rx_queue_stop,
467         .tx_queue_start               = i40e_dev_tx_queue_start,
468         .tx_queue_stop                = i40e_dev_tx_queue_stop,
469         .rx_queue_setup               = i40e_dev_rx_queue_setup,
470         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
471         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
472         .rx_queue_release             = i40e_dev_rx_queue_release,
473         .rx_queue_count               = i40e_dev_rx_queue_count,
474         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
475         .tx_queue_setup               = i40e_dev_tx_queue_setup,
476         .tx_queue_release             = i40e_dev_tx_queue_release,
477         .dev_led_on                   = i40e_dev_led_on,
478         .dev_led_off                  = i40e_dev_led_off,
479         .flow_ctrl_get                = i40e_flow_ctrl_get,
480         .flow_ctrl_set                = i40e_flow_ctrl_set,
481         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
482         .mac_addr_add                 = i40e_macaddr_add,
483         .mac_addr_remove              = i40e_macaddr_remove,
484         .reta_update                  = i40e_dev_rss_reta_update,
485         .reta_query                   = i40e_dev_rss_reta_query,
486         .rss_hash_update              = i40e_dev_rss_hash_update,
487         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
488         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
489         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
490         .filter_ctrl                  = i40e_dev_filter_ctrl,
491         .rxq_info_get                 = i40e_rxq_info_get,
492         .txq_info_get                 = i40e_txq_info_get,
493         .mirror_rule_set              = i40e_mirror_rule_set,
494         .mirror_rule_reset            = i40e_mirror_rule_reset,
495         .timesync_enable              = i40e_timesync_enable,
496         .timesync_disable             = i40e_timesync_disable,
497         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
498         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
499         .get_dcb_info                 = i40e_dev_get_dcb_info,
500         .timesync_adjust_time         = i40e_timesync_adjust_time,
501         .timesync_read_time           = i40e_timesync_read_time,
502         .timesync_write_time          = i40e_timesync_write_time,
503 };
504
505 /* store statistics names and its offset in stats structure */
506 struct rte_i40e_xstats_name_off {
507         char name[RTE_ETH_XSTATS_NAME_SIZE];
508         unsigned offset;
509 };
510
511 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
512         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
513         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
514         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
515         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
516         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
517                 rx_unknown_protocol)},
518         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
519         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
520         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
521         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
522 };
523
524 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
525                 sizeof(rte_i40e_stats_strings[0]))
526
527 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
528         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
529                 tx_dropped_link_down)},
530         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
531         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
532                 illegal_bytes)},
533         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
534         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
535                 mac_local_faults)},
536         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
537                 mac_remote_faults)},
538         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
539                 rx_length_errors)},
540         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
541         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
542         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
543         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
544         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
545         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
546                 rx_size_127)},
547         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
548                 rx_size_255)},
549         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
550                 rx_size_511)},
551         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
552                 rx_size_1023)},
553         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
554                 rx_size_1522)},
555         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
556                 rx_size_big)},
557         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
558                 rx_undersize)},
559         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
560                 rx_oversize)},
561         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
562                 mac_short_packet_dropped)},
563         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
564                 rx_fragments)},
565         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
566         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
567         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
568                 tx_size_127)},
569         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
570                 tx_size_255)},
571         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
572                 tx_size_511)},
573         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
574                 tx_size_1023)},
575         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
576                 tx_size_1522)},
577         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
578                 tx_size_big)},
579         {"rx_flow_director_atr_match_packets",
580                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
581         {"rx_flow_director_sb_match_packets",
582                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
583         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
584                 tx_lpi_status)},
585         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
586                 rx_lpi_status)},
587         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
588                 tx_lpi_count)},
589         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
590                 rx_lpi_count)},
591 };
592
593 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
594                 sizeof(rte_i40e_hw_port_strings[0]))
595
596 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
597         {"xon_packets", offsetof(struct i40e_hw_port_stats,
598                 priority_xon_rx)},
599         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
600                 priority_xoff_rx)},
601 };
602
603 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
604                 sizeof(rte_i40e_rxq_prio_strings[0]))
605
606 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
607         {"xon_packets", offsetof(struct i40e_hw_port_stats,
608                 priority_xon_tx)},
609         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
610                 priority_xoff_tx)},
611         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
612                 priority_xon_2_xoff)},
613 };
614
615 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
616                 sizeof(rte_i40e_txq_prio_strings[0]))
617
618 static struct eth_driver rte_i40e_pmd = {
619         .pci_drv = {
620                 .name = "rte_i40e_pmd",
621                 .id_table = pci_id_i40e_map,
622                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
623                         RTE_PCI_DRV_DETACHABLE,
624         },
625         .eth_dev_init = eth_i40e_dev_init,
626         .eth_dev_uninit = eth_i40e_dev_uninit,
627         .dev_private_size = sizeof(struct i40e_adapter),
628 };
629
630 static inline int
631 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
632                                      struct rte_eth_link *link)
633 {
634         struct rte_eth_link *dst = link;
635         struct rte_eth_link *src = &(dev->data->dev_link);
636
637         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
638                                         *(uint64_t *)src) == 0)
639                 return -1;
640
641         return 0;
642 }
643
644 static inline int
645 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
646                                       struct rte_eth_link *link)
647 {
648         struct rte_eth_link *dst = &(dev->data->dev_link);
649         struct rte_eth_link *src = link;
650
651         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
652                                         *(uint64_t *)src) == 0)
653                 return -1;
654
655         return 0;
656 }
657
658 /*
659  * Driver initialization routine.
660  * Invoked once at EAL init time.
661  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
662  */
663 static int
664 rte_i40e_pmd_init(const char *name __rte_unused,
665                   const char *params __rte_unused)
666 {
667         PMD_INIT_FUNC_TRACE();
668         rte_eth_driver_register(&rte_i40e_pmd);
669
670         return 0;
671 }
672
673 static struct rte_driver rte_i40e_driver = {
674         .type = PMD_PDEV,
675         .init = rte_i40e_pmd_init,
676 };
677
678 PMD_REGISTER_DRIVER(rte_i40e_driver);
679
680 /*
681  * Initialize registers for flexible payload, which should be set by NVM.
682  * This should be removed from code once it is fixed in NVM.
683  */
684 #ifndef I40E_GLQF_ORT
685 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
686 #endif
687 #ifndef I40E_GLQF_PIT
688 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
689 #endif
690
691 static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
692 {
693         I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
694         I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
695         I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
696         I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
697         I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
698         I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
699         I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
700         I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
701         I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
702         I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
703
704         /* GLQF_PIT Registers */
705         I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
706         I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
707 }
708
709 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
710
711 /*
712  * Add a ethertype filter to drop all flow control frames transmitted
713  * from VSIs.
714 */
715 static void
716 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
717 {
718         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
719         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
720                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
721                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
722         int ret;
723
724         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
725                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
726                                 pf->main_vsi_seid, 0,
727                                 TRUE, NULL, NULL);
728         if (ret)
729                 PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
730                                   " frames from VSIs.");
731 }
732
733 static int
734 eth_i40e_dev_init(struct rte_eth_dev *dev)
735 {
736         struct rte_pci_device *pci_dev;
737         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
738         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
739         struct i40e_vsi *vsi;
740         int ret;
741         uint32_t len;
742         uint8_t aq_fail = 0;
743
744         PMD_INIT_FUNC_TRACE();
745
746         dev->dev_ops = &i40e_eth_dev_ops;
747         dev->rx_pkt_burst = i40e_recv_pkts;
748         dev->tx_pkt_burst = i40e_xmit_pkts;
749
750         /* for secondary processes, we don't initialise any further as primary
751          * has already done this work. Only check we don't need a different
752          * RX function */
753         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
754                 i40e_set_rx_function(dev);
755                 i40e_set_tx_function(dev);
756                 return 0;
757         }
758         pci_dev = dev->pci_dev;
759
760         rte_eth_copy_pci_info(dev, pci_dev);
761
762         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
763         pf->adapter->eth_dev = dev;
764         pf->dev_data = dev->data;
765
766         hw->back = I40E_PF_TO_ADAPTER(pf);
767         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
768         if (!hw->hw_addr) {
769                 PMD_INIT_LOG(ERR, "Hardware is not available, "
770                              "as address is NULL");
771                 return -ENODEV;
772         }
773
774         hw->vendor_id = pci_dev->id.vendor_id;
775         hw->device_id = pci_dev->id.device_id;
776         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
777         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
778         hw->bus.device = pci_dev->addr.devid;
779         hw->bus.func = pci_dev->addr.function;
780         hw->adapter_stopped = 0;
781
782         /* Make sure all is clean before doing PF reset */
783         i40e_clear_hw(hw);
784
785         /* Initialize the hardware */
786         i40e_hw_init(dev);
787
788         /* Reset here to make sure all is clean for each PF */
789         ret = i40e_pf_reset(hw);
790         if (ret) {
791                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
792                 return ret;
793         }
794
795         /* Initialize the shared code (base driver) */
796         ret = i40e_init_shared_code(hw);
797         if (ret) {
798                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
799                 return ret;
800         }
801
802         /*
803          * To work around the NVM issue,initialize registers
804          * for flexible payload by software.
805          * It should be removed once issues are fixed in NVM.
806          */
807         i40e_flex_payload_reg_init(hw);
808
809         /* Initialize the parameters for adminq */
810         i40e_init_adminq_parameter(hw);
811         ret = i40e_init_adminq(hw);
812         if (ret != I40E_SUCCESS) {
813                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
814                 return -EIO;
815         }
816         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
817                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
818                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
819                      ((hw->nvm.version >> 12) & 0xf),
820                      ((hw->nvm.version >> 4) & 0xff),
821                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
822
823         /* Clear PXE mode */
824         i40e_clear_pxe_mode(hw);
825
826         /*
827          * On X710, performance number is far from the expectation on recent
828          * firmware versions. The fix for this issue may not be integrated in
829          * the following firmware version. So the workaround in software driver
830          * is needed. It needs to modify the initial values of 3 internal only
831          * registers. Note that the workaround can be removed when it is fixed
832          * in firmware in the future.
833          */
834         i40e_configure_registers(hw);
835
836         /* Get hw capabilities */
837         ret = i40e_get_cap(hw);
838         if (ret != I40E_SUCCESS) {
839                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
840                 goto err_get_capabilities;
841         }
842
843         /* Initialize parameters for PF */
844         ret = i40e_pf_parameter_init(dev);
845         if (ret != 0) {
846                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
847                 goto err_parameter_init;
848         }
849
850         /* Initialize the queue management */
851         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
852         if (ret < 0) {
853                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
854                 goto err_qp_pool_init;
855         }
856         ret = i40e_res_pool_init(&pf->msix_pool, 1,
857                                 hw->func_caps.num_msix_vectors - 1);
858         if (ret < 0) {
859                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
860                 goto err_msix_pool_init;
861         }
862
863         /* Initialize lan hmc */
864         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
865                                 hw->func_caps.num_rx_qp, 0, 0);
866         if (ret != I40E_SUCCESS) {
867                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
868                 goto err_init_lan_hmc;
869         }
870
871         /* Configure lan hmc */
872         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
873         if (ret != I40E_SUCCESS) {
874                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
875                 goto err_configure_lan_hmc;
876         }
877
878         /* Get and check the mac address */
879         i40e_get_mac_addr(hw, hw->mac.addr);
880         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
881                 PMD_INIT_LOG(ERR, "mac address is not valid");
882                 ret = -EIO;
883                 goto err_get_mac_addr;
884         }
885         /* Copy the permanent MAC address */
886         ether_addr_copy((struct ether_addr *) hw->mac.addr,
887                         (struct ether_addr *) hw->mac.perm_addr);
888
889         /* Disable flow control */
890         hw->fc.requested_mode = I40E_FC_NONE;
891         i40e_set_fc(hw, &aq_fail, TRUE);
892
893         /* Set the global registers with default ether type value */
894         ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
895         if (ret != I40E_SUCCESS) {
896                 PMD_INIT_LOG(ERR, "Failed to set the default outer "
897                              "VLAN ether type");
898                 goto err_setup_pf_switch;
899         }
900         ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, ETHER_TYPE_VLAN);
901         if (ret != I40E_SUCCESS) {
902                 PMD_INIT_LOG(ERR, "Failed to set the default outer "
903                              "VLAN ether type");
904                 goto err_setup_pf_switch;
905         }
906
907         /* PF setup, which includes VSI setup */
908         ret = i40e_pf_setup(pf);
909         if (ret) {
910                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
911                 goto err_setup_pf_switch;
912         }
913
914         vsi = pf->main_vsi;
915
916         /* Disable double vlan by default */
917         i40e_vsi_config_double_vlan(vsi, FALSE);
918
919         if (!vsi->max_macaddrs)
920                 len = ETHER_ADDR_LEN;
921         else
922                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
923
924         /* Should be after VSI initialized */
925         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
926         if (!dev->data->mac_addrs) {
927                 PMD_INIT_LOG(ERR, "Failed to allocated memory "
928                                         "for storing mac address");
929                 goto err_mac_alloc;
930         }
931         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
932                                         &dev->data->mac_addrs[0]);
933
934         /* initialize pf host driver to setup SRIOV resource if applicable */
935         i40e_pf_host_init(dev);
936
937         /* register callback func to eal lib */
938         rte_intr_callback_register(&(pci_dev->intr_handle),
939                 i40e_dev_interrupt_handler, (void *)dev);
940
941         /* configure and enable device interrupt */
942         i40e_pf_config_irq0(hw, TRUE);
943         i40e_pf_enable_irq0(hw);
944
945         /* enable uio intr after callback register */
946         rte_intr_enable(&(pci_dev->intr_handle));
947         /*
948          * Add an ethertype filter to drop all flow control frames transmitted
949          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
950          * frames to wire.
951          */
952         i40e_add_tx_flow_control_drop_filter(pf);
953
954         /* Set the max frame size to 0x2600 by default,
955          * in case other drivers changed the default value.
956          */
957         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
958
959         /* initialize mirror rule list */
960         TAILQ_INIT(&pf->mirror_list);
961
962         /* Init dcb to sw mode by default */
963         ret = i40e_dcb_init_configure(dev, TRUE);
964         if (ret != I40E_SUCCESS) {
965                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
966                 pf->flags &= ~I40E_FLAG_DCB;
967         }
968
969         return 0;
970
971 err_mac_alloc:
972         i40e_vsi_release(pf->main_vsi);
973 err_setup_pf_switch:
974 err_get_mac_addr:
975 err_configure_lan_hmc:
976         (void)i40e_shutdown_lan_hmc(hw);
977 err_init_lan_hmc:
978         i40e_res_pool_destroy(&pf->msix_pool);
979 err_msix_pool_init:
980         i40e_res_pool_destroy(&pf->qp_pool);
981 err_qp_pool_init:
982 err_parameter_init:
983 err_get_capabilities:
984         (void)i40e_shutdown_adminq(hw);
985
986         return ret;
987 }
988
989 static int
990 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
991 {
992         struct rte_pci_device *pci_dev;
993         struct i40e_hw *hw;
994         struct i40e_filter_control_settings settings;
995         int ret;
996         uint8_t aq_fail = 0;
997
998         PMD_INIT_FUNC_TRACE();
999
1000         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1001                 return 0;
1002
1003         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1004         pci_dev = dev->pci_dev;
1005
1006         if (hw->adapter_stopped == 0)
1007                 i40e_dev_close(dev);
1008
1009         dev->dev_ops = NULL;
1010         dev->rx_pkt_burst = NULL;
1011         dev->tx_pkt_burst = NULL;
1012
1013         /* Disable LLDP */
1014         ret = i40e_aq_stop_lldp(hw, true, NULL);
1015         if (ret != I40E_SUCCESS) /* Its failure can be ignored */
1016                 PMD_INIT_LOG(INFO, "Failed to stop lldp");
1017
1018         /* Clear PXE mode */
1019         i40e_clear_pxe_mode(hw);
1020
1021         /* Unconfigure filter control */
1022         memset(&settings, 0, sizeof(settings));
1023         ret = i40e_set_filter_control(hw, &settings);
1024         if (ret)
1025                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1026                                         ret);
1027
1028         /* Disable flow control */
1029         hw->fc.requested_mode = I40E_FC_NONE;
1030         i40e_set_fc(hw, &aq_fail, TRUE);
1031
1032         /* uninitialize pf host driver */
1033         i40e_pf_host_uninit(dev);
1034
1035         rte_free(dev->data->mac_addrs);
1036         dev->data->mac_addrs = NULL;
1037
1038         /* disable uio intr before callback unregister */
1039         rte_intr_disable(&(pci_dev->intr_handle));
1040
1041         /* register callback func to eal lib */
1042         rte_intr_callback_unregister(&(pci_dev->intr_handle),
1043                 i40e_dev_interrupt_handler, (void *)dev);
1044
1045         return 0;
1046 }
1047
1048 static int
1049 i40e_dev_configure(struct rte_eth_dev *dev)
1050 {
1051         struct i40e_adapter *ad =
1052                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1053         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1054         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1055         int i, ret;
1056
1057         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1058          * bulk allocation or vector Rx preconditions we will reset it.
1059          */
1060         ad->rx_bulk_alloc_allowed = true;
1061         ad->rx_vec_allowed = true;
1062         ad->tx_simple_allowed = true;
1063         ad->tx_vec_allowed = true;
1064
1065         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1066                 ret = i40e_fdir_setup(pf);
1067                 if (ret != I40E_SUCCESS) {
1068                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1069                         return -ENOTSUP;
1070                 }
1071                 ret = i40e_fdir_configure(dev);
1072                 if (ret < 0) {
1073                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1074                         goto err;
1075                 }
1076         } else
1077                 i40e_fdir_teardown(pf);
1078
1079         ret = i40e_dev_init_vlan(dev);
1080         if (ret < 0)
1081                 goto err;
1082
1083         /* VMDQ setup.
1084          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1085          *  RSS setting have different requirements.
1086          *  General PMD driver call sequence are NIC init, configure,
1087          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1088          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1089          *  applicable. So, VMDQ setting has to be done before
1090          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1091          *  For RSS setting, it will try to calculate actual configured RX queue
1092          *  number, which will be available after rx_queue_setup(). dev_start()
1093          *  function is good to place RSS setup.
1094          */
1095         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1096                 ret = i40e_vmdq_setup(dev);
1097                 if (ret)
1098                         goto err;
1099         }
1100
1101         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1102                 ret = i40e_dcb_setup(dev);
1103                 if (ret) {
1104                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1105                         goto err_dcb;
1106                 }
1107         }
1108
1109         return 0;
1110
1111 err_dcb:
1112         /* need to release vmdq resource if exists */
1113         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1114                 i40e_vsi_release(pf->vmdq[i].vsi);
1115                 pf->vmdq[i].vsi = NULL;
1116         }
1117         rte_free(pf->vmdq);
1118         pf->vmdq = NULL;
1119 err:
1120         /* need to release fdir resource if exists */
1121         i40e_fdir_teardown(pf);
1122         return ret;
1123 }
1124
1125 void
1126 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1127 {
1128         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1129         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1130         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1131         uint16_t msix_vect = vsi->msix_intr;
1132         uint16_t i;
1133
1134         for (i = 0; i < vsi->nb_qps; i++) {
1135                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1136                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1137                 rte_wmb();
1138         }
1139
1140         if (vsi->type != I40E_VSI_SRIOV) {
1141                 if (!rte_intr_allow_others(intr_handle)) {
1142                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1143                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1144                         I40E_WRITE_REG(hw,
1145                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1146                                        0);
1147                 } else {
1148                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1149                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1150                         I40E_WRITE_REG(hw,
1151                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1152                                                        msix_vect - 1), 0);
1153                 }
1154         } else {
1155                 uint32_t reg;
1156                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1157                         vsi->user_param + (msix_vect - 1);
1158
1159                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1160                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1161         }
1162         I40E_WRITE_FLUSH(hw);
1163 }
1164
1165 static void
1166 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1167                        int base_queue, int nb_queue)
1168 {
1169         int i;
1170         uint32_t val;
1171         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1172
1173         /* Bind all RX queues to allocated MSIX interrupt */
1174         for (i = 0; i < nb_queue; i++) {
1175                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1176                         I40E_QINT_RQCTL_ITR_INDX_MASK |
1177                         ((base_queue + i + 1) <<
1178                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1179                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1180                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1181
1182                 if (i == nb_queue - 1)
1183                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1184                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1185         }
1186
1187         /* Write first RX queue to Link list register as the head element */
1188         if (vsi->type != I40E_VSI_SRIOV) {
1189                 uint16_t interval =
1190                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1191
1192                 if (msix_vect == I40E_MISC_VEC_ID) {
1193                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1194                                        (base_queue <<
1195                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1196                                        (0x0 <<
1197                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1198                         I40E_WRITE_REG(hw,
1199                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1200                                        interval);
1201                 } else {
1202                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1203                                        (base_queue <<
1204                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1205                                        (0x0 <<
1206                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1207                         I40E_WRITE_REG(hw,
1208                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1209                                                        msix_vect - 1),
1210                                        interval);
1211                 }
1212         } else {
1213                 uint32_t reg;
1214
1215                 if (msix_vect == I40E_MISC_VEC_ID) {
1216                         I40E_WRITE_REG(hw,
1217                                        I40E_VPINT_LNKLST0(vsi->user_param),
1218                                        (base_queue <<
1219                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1220                                        (0x0 <<
1221                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1222                 } else {
1223                         /* num_msix_vectors_vf needs to minus irq0 */
1224                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1225                                 vsi->user_param + (msix_vect - 1);
1226
1227                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1228                                        (base_queue <<
1229                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1230                                        (0x0 <<
1231                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1232                 }
1233         }
1234
1235         I40E_WRITE_FLUSH(hw);
1236 }
1237
1238 void
1239 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
1240 {
1241         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1242         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1243         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1244         uint16_t msix_vect = vsi->msix_intr;
1245         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1246         uint16_t queue_idx = 0;
1247         int record = 0;
1248         uint32_t val;
1249         int i;
1250
1251         for (i = 0; i < vsi->nb_qps; i++) {
1252                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1253                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1254         }
1255
1256         /* INTENA flag is not auto-cleared for interrupt */
1257         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
1258         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
1259                 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
1260                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
1261         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
1262
1263         /* VF bind interrupt */
1264         if (vsi->type == I40E_VSI_SRIOV) {
1265                 __vsi_queues_bind_intr(vsi, msix_vect,
1266                                        vsi->base_queue, vsi->nb_qps);
1267                 return;
1268         }
1269
1270         /* PF & VMDq bind interrupt */
1271         if (rte_intr_dp_is_en(intr_handle)) {
1272                 if (vsi->type == I40E_VSI_MAIN) {
1273                         queue_idx = 0;
1274                         record = 1;
1275                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1276                         struct i40e_vsi *main_vsi =
1277                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1278                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1279                         record = 1;
1280                 }
1281         }
1282
1283         for (i = 0; i < vsi->nb_used_qps; i++) {
1284                 if (nb_msix <= 1) {
1285                         if (!rte_intr_allow_others(intr_handle))
1286                                 /* allow to share MISC_VEC_ID */
1287                                 msix_vect = I40E_MISC_VEC_ID;
1288
1289                         /* no enough msix_vect, map all to one */
1290                         __vsi_queues_bind_intr(vsi, msix_vect,
1291                                                vsi->base_queue + i,
1292                                                vsi->nb_used_qps - i);
1293                         for (; !!record && i < vsi->nb_used_qps; i++)
1294                                 intr_handle->intr_vec[queue_idx + i] =
1295                                         msix_vect;
1296                         break;
1297                 }
1298                 /* 1:1 queue/msix_vect mapping */
1299                 __vsi_queues_bind_intr(vsi, msix_vect,
1300                                        vsi->base_queue + i, 1);
1301                 if (!!record)
1302                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1303
1304                 msix_vect++;
1305                 nb_msix--;
1306         }
1307 }
1308
1309 static void
1310 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1311 {
1312         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1313         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1314         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1315         uint16_t interval = i40e_calc_itr_interval(\
1316                 RTE_LIBRTE_I40E_ITR_INTERVAL);
1317         uint16_t msix_intr, i;
1318
1319         if (rte_intr_allow_others(intr_handle))
1320                 for (i = 0; i < vsi->nb_msix; i++) {
1321                         msix_intr = vsi->msix_intr + i;
1322                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1323                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1324                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1325                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1326                                 (interval <<
1327                                  I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
1328                 }
1329         else
1330                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1331                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1332                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1333                                (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1334                                (interval <<
1335                                 I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
1336
1337         I40E_WRITE_FLUSH(hw);
1338 }
1339
1340 static void
1341 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1342 {
1343         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1344         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1345         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1346         uint16_t msix_intr, i;
1347
1348         if (rte_intr_allow_others(intr_handle))
1349                 for (i = 0; i < vsi->nb_msix; i++) {
1350                         msix_intr = vsi->msix_intr + i;
1351                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1352                                        0);
1353                 }
1354         else
1355                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
1356
1357         I40E_WRITE_FLUSH(hw);
1358 }
1359
1360 static inline uint8_t
1361 i40e_parse_link_speed(uint16_t eth_link_speed)
1362 {
1363         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1364
1365         switch (eth_link_speed) {
1366         case ETH_LINK_SPEED_40G:
1367                 link_speed = I40E_LINK_SPEED_40GB;
1368                 break;
1369         case ETH_LINK_SPEED_20G:
1370                 link_speed = I40E_LINK_SPEED_20GB;
1371                 break;
1372         case ETH_LINK_SPEED_10G:
1373                 link_speed = I40E_LINK_SPEED_10GB;
1374                 break;
1375         case ETH_LINK_SPEED_1000:
1376                 link_speed = I40E_LINK_SPEED_1GB;
1377                 break;
1378         case ETH_LINK_SPEED_100:
1379                 link_speed = I40E_LINK_SPEED_100MB;
1380                 break;
1381         }
1382
1383         return link_speed;
1384 }
1385
1386 static int
1387 i40e_phy_conf_link(__rte_unused struct i40e_hw *hw,
1388                    __rte_unused uint8_t abilities,
1389                    __rte_unused uint8_t force_speed)
1390 {
1391         /* Skip any phy config on both 10G and 40G interfaces, as a workaround
1392          * for the link control limitation of that all link control should be
1393          * handled by firmware. It should follow up if link control will be
1394          * opened to software driver in future firmware versions.
1395          */
1396         return I40E_SUCCESS;
1397 }
1398
1399 static int
1400 i40e_apply_link_speed(struct rte_eth_dev *dev)
1401 {
1402         uint8_t speed;
1403         uint8_t abilities = 0;
1404         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1405         struct rte_eth_conf *conf = &dev->data->dev_conf;
1406
1407         speed = i40e_parse_link_speed(conf->link_speed);
1408         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1409         if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
1410                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1411         else
1412                 abilities |= I40E_AQ_PHY_LINK_ENABLED;
1413
1414         return i40e_phy_conf_link(hw, abilities, speed);
1415 }
1416
1417 static int
1418 i40e_dev_start(struct rte_eth_dev *dev)
1419 {
1420         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1421         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1422         struct i40e_vsi *main_vsi = pf->main_vsi;
1423         int ret, i;
1424         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1425         uint32_t intr_vector = 0;
1426
1427         hw->adapter_stopped = 0;
1428
1429         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
1430                 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
1431                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
1432                              dev->data->dev_conf.link_duplex,
1433                              dev->data->port_id);
1434                 return -EINVAL;
1435         }
1436
1437         rte_intr_disable(intr_handle);
1438
1439         if ((rte_intr_cap_multiple(intr_handle) ||
1440              !RTE_ETH_DEV_SRIOV(dev).active) &&
1441             dev->data->dev_conf.intr_conf.rxq != 0) {
1442                 intr_vector = dev->data->nb_rx_queues;
1443                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1444                         return -1;
1445         }
1446
1447         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1448                 intr_handle->intr_vec =
1449                         rte_zmalloc("intr_vec",
1450                                     dev->data->nb_rx_queues * sizeof(int),
1451                                     0);
1452                 if (!intr_handle->intr_vec) {
1453                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1454                                      " intr_vec\n", dev->data->nb_rx_queues);
1455                         return -ENOMEM;
1456                 }
1457         }
1458
1459         /* Initialize VSI */
1460         ret = i40e_dev_rxtx_init(pf);
1461         if (ret != I40E_SUCCESS) {
1462                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
1463                 goto err_up;
1464         }
1465
1466         /* Map queues with MSIX interrupt */
1467         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
1468                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1469         i40e_vsi_queues_bind_intr(main_vsi);
1470         i40e_vsi_enable_queues_intr(main_vsi);
1471
1472         /* Map VMDQ VSI queues with MSIX interrupt */
1473         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1474                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1475                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
1476                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
1477         }
1478
1479         /* enable FDIR MSIX interrupt */
1480         if (pf->fdir.fdir_vsi) {
1481                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
1482                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1483         }
1484
1485         /* Enable all queues which have been configured */
1486         ret = i40e_dev_switch_queues(pf, TRUE);
1487         if (ret != I40E_SUCCESS) {
1488                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
1489                 goto err_up;
1490         }
1491
1492         /* Enable receiving broadcast packets */
1493         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
1494         if (ret != I40E_SUCCESS)
1495                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1496
1497         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1498                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
1499                                                 true, NULL);
1500                 if (ret != I40E_SUCCESS)
1501                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1502         }
1503
1504         /* Apply link configure */
1505         ret = i40e_apply_link_speed(dev);
1506         if (I40E_SUCCESS != ret) {
1507                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
1508                 goto err_up;
1509         }
1510
1511         if (!rte_intr_allow_others(intr_handle)) {
1512                 rte_intr_callback_unregister(intr_handle,
1513                                              i40e_dev_interrupt_handler,
1514                                              (void *)dev);
1515                 /* configure and enable device interrupt */
1516                 i40e_pf_config_irq0(hw, FALSE);
1517                 i40e_pf_enable_irq0(hw);
1518
1519                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1520                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
1521                                      " no intr multiplex\n");
1522         }
1523
1524         /* enable uio intr after callback register */
1525         rte_intr_enable(intr_handle);
1526
1527         return I40E_SUCCESS;
1528
1529 err_up:
1530         i40e_dev_switch_queues(pf, FALSE);
1531         i40e_dev_clear_queues(dev);
1532
1533         return ret;
1534 }
1535
1536 static void
1537 i40e_dev_stop(struct rte_eth_dev *dev)
1538 {
1539         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1540         struct i40e_vsi *main_vsi = pf->main_vsi;
1541         struct i40e_mirror_rule *p_mirror;
1542         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1543         int i;
1544
1545         /* Disable all queues */
1546         i40e_dev_switch_queues(pf, FALSE);
1547
1548         /* un-map queues with interrupt registers */
1549         i40e_vsi_disable_queues_intr(main_vsi);
1550         i40e_vsi_queues_unbind_intr(main_vsi);
1551
1552         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1553                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
1554                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
1555         }
1556
1557         if (pf->fdir.fdir_vsi) {
1558                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
1559                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
1560         }
1561         /* Clear all queues and release memory */
1562         i40e_dev_clear_queues(dev);
1563
1564         /* Set link down */
1565         i40e_dev_set_link_down(dev);
1566
1567         /* Remove all mirror rules */
1568         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
1569                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
1570                 rte_free(p_mirror);
1571         }
1572         pf->nb_mirror_rule = 0;
1573
1574         if (!rte_intr_allow_others(intr_handle))
1575                 /* resume to the default handler */
1576                 rte_intr_callback_register(intr_handle,
1577                                            i40e_dev_interrupt_handler,
1578                                            (void *)dev);
1579
1580         /* Clean datapath event and queue/vec mapping */
1581         rte_intr_efd_disable(intr_handle);
1582         if (intr_handle->intr_vec) {
1583                 rte_free(intr_handle->intr_vec);
1584                 intr_handle->intr_vec = NULL;
1585         }
1586 }
1587
1588 static void
1589 i40e_dev_close(struct rte_eth_dev *dev)
1590 {
1591         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1592         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1593         uint32_t reg;
1594         int i;
1595
1596         PMD_INIT_FUNC_TRACE();
1597
1598         i40e_dev_stop(dev);
1599         hw->adapter_stopped = 1;
1600         i40e_dev_free_queues(dev);
1601
1602         /* Disable interrupt */
1603         i40e_pf_disable_irq0(hw);
1604         rte_intr_disable(&(dev->pci_dev->intr_handle));
1605
1606         /* shutdown and destroy the HMC */
1607         i40e_shutdown_lan_hmc(hw);
1608
1609         /* release all the existing VSIs and VEBs */
1610         i40e_fdir_teardown(pf);
1611         i40e_vsi_release(pf->main_vsi);
1612
1613         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1614                 i40e_vsi_release(pf->vmdq[i].vsi);
1615                 pf->vmdq[i].vsi = NULL;
1616         }
1617
1618         rte_free(pf->vmdq);
1619         pf->vmdq = NULL;
1620
1621         /* shutdown the adminq */
1622         i40e_aq_queue_shutdown(hw, true);
1623         i40e_shutdown_adminq(hw);
1624
1625         i40e_res_pool_destroy(&pf->qp_pool);
1626         i40e_res_pool_destroy(&pf->msix_pool);
1627
1628         /* force a PF reset to clean anything leftover */
1629         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
1630         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
1631                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1632         I40E_WRITE_FLUSH(hw);
1633 }
1634
1635 static void
1636 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
1637 {
1638         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1639         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1640         struct i40e_vsi *vsi = pf->main_vsi;
1641         int status;
1642
1643         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1644                                                         true, NULL);
1645         if (status != I40E_SUCCESS)
1646                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
1647
1648         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1649                                                         TRUE, NULL);
1650         if (status != I40E_SUCCESS)
1651                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1652
1653 }
1654
1655 static void
1656 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
1657 {
1658         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1659         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1660         struct i40e_vsi *vsi = pf->main_vsi;
1661         int status;
1662
1663         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1664                                                         false, NULL);
1665         if (status != I40E_SUCCESS)
1666                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
1667
1668         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1669                                                         false, NULL);
1670         if (status != I40E_SUCCESS)
1671                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1672 }
1673
1674 static void
1675 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
1676 {
1677         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1678         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1679         struct i40e_vsi *vsi = pf->main_vsi;
1680         int ret;
1681
1682         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
1683         if (ret != I40E_SUCCESS)
1684                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1685 }
1686
1687 static void
1688 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
1689 {
1690         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1691         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1692         struct i40e_vsi *vsi = pf->main_vsi;
1693         int ret;
1694
1695         if (dev->data->promiscuous == 1)
1696                 return; /* must remain in all_multicast mode */
1697
1698         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
1699                                 vsi->seid, FALSE, NULL);
1700         if (ret != I40E_SUCCESS)
1701                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1702 }
1703
1704 /*
1705  * Set device link up.
1706  */
1707 static int
1708 i40e_dev_set_link_up(struct rte_eth_dev *dev)
1709 {
1710         /* re-apply link speed setting */
1711         return i40e_apply_link_speed(dev);
1712 }
1713
1714 /*
1715  * Set device link down.
1716  */
1717 static int
1718 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
1719 {
1720         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
1721         uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1722         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1723
1724         return i40e_phy_conf_link(hw, abilities, speed);
1725 }
1726
1727 int
1728 i40e_dev_link_update(struct rte_eth_dev *dev,
1729                      int wait_to_complete)
1730 {
1731 #define CHECK_INTERVAL 100  /* 100ms */
1732 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1733         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1734         struct i40e_link_status link_status;
1735         struct rte_eth_link link, old;
1736         int status;
1737         unsigned rep_cnt = MAX_REPEAT_TIME;
1738
1739         memset(&link, 0, sizeof(link));
1740         memset(&old, 0, sizeof(old));
1741         memset(&link_status, 0, sizeof(link_status));
1742         rte_i40e_dev_atomic_read_link_status(dev, &old);
1743
1744         do {
1745                 /* Get link status information from hardware */
1746                 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
1747                 if (status != I40E_SUCCESS) {
1748                         link.link_speed = ETH_LINK_SPEED_100;
1749                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1750                         PMD_DRV_LOG(ERR, "Failed to get link info");
1751                         goto out;
1752                 }
1753
1754                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
1755                 if (!wait_to_complete)
1756                         break;
1757
1758                 rte_delay_ms(CHECK_INTERVAL);
1759         } while (!link.link_status && rep_cnt--);
1760
1761         if (!link.link_status)
1762                 goto out;
1763
1764         /* i40e uses full duplex only */
1765         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1766
1767         /* Parse the link status */
1768         switch (link_status.link_speed) {
1769         case I40E_LINK_SPEED_100MB:
1770                 link.link_speed = ETH_LINK_SPEED_100;
1771                 break;
1772         case I40E_LINK_SPEED_1GB:
1773                 link.link_speed = ETH_LINK_SPEED_1000;
1774                 break;
1775         case I40E_LINK_SPEED_10GB:
1776                 link.link_speed = ETH_LINK_SPEED_10G;
1777                 break;
1778         case I40E_LINK_SPEED_20GB:
1779                 link.link_speed = ETH_LINK_SPEED_20G;
1780                 break;
1781         case I40E_LINK_SPEED_40GB:
1782                 link.link_speed = ETH_LINK_SPEED_40G;
1783                 break;
1784         default:
1785                 link.link_speed = ETH_LINK_SPEED_100;
1786                 break;
1787         }
1788
1789 out:
1790         rte_i40e_dev_atomic_write_link_status(dev, &link);
1791         if (link.link_status == old.link_status)
1792                 return -1;
1793
1794         return 0;
1795 }
1796
1797 /* Get all the statistics of a VSI */
1798 void
1799 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1800 {
1801         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1802         struct i40e_eth_stats *nes = &vsi->eth_stats;
1803         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1804         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1805
1806         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1807                             vsi->offset_loaded, &oes->rx_bytes,
1808                             &nes->rx_bytes);
1809         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1810                             vsi->offset_loaded, &oes->rx_unicast,
1811                             &nes->rx_unicast);
1812         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1813                             vsi->offset_loaded, &oes->rx_multicast,
1814                             &nes->rx_multicast);
1815         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1816                             vsi->offset_loaded, &oes->rx_broadcast,
1817                             &nes->rx_broadcast);
1818         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1819                             &oes->rx_discards, &nes->rx_discards);
1820         /* GLV_REPC not supported */
1821         /* GLV_RMPC not supported */
1822         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1823                             &oes->rx_unknown_protocol,
1824                             &nes->rx_unknown_protocol);
1825         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1826                             vsi->offset_loaded, &oes->tx_bytes,
1827                             &nes->tx_bytes);
1828         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1829                             vsi->offset_loaded, &oes->tx_unicast,
1830                             &nes->tx_unicast);
1831         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1832                             vsi->offset_loaded, &oes->tx_multicast,
1833                             &nes->tx_multicast);
1834         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1835                             vsi->offset_loaded,  &oes->tx_broadcast,
1836                             &nes->tx_broadcast);
1837         /* GLV_TDPC not supported */
1838         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1839                             &oes->tx_errors, &nes->tx_errors);
1840         vsi->offset_loaded = true;
1841
1842         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1843                     vsi->vsi_id);
1844         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
1845         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
1846         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
1847         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
1848         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
1849         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1850                     nes->rx_unknown_protocol);
1851         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
1852         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
1853         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
1854         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
1855         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
1856         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
1857         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1858                     vsi->vsi_id);
1859 }
1860
1861 static void
1862 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
1863 {
1864         unsigned int i;
1865         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1866         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1867
1868         /* Get statistics of struct i40e_eth_stats */
1869         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1870                             I40E_GLPRT_GORCL(hw->port),
1871                             pf->offset_loaded, &os->eth.rx_bytes,
1872                             &ns->eth.rx_bytes);
1873         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1874                             I40E_GLPRT_UPRCL(hw->port),
1875                             pf->offset_loaded, &os->eth.rx_unicast,
1876                             &ns->eth.rx_unicast);
1877         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1878                             I40E_GLPRT_MPRCL(hw->port),
1879                             pf->offset_loaded, &os->eth.rx_multicast,
1880                             &ns->eth.rx_multicast);
1881         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1882                             I40E_GLPRT_BPRCL(hw->port),
1883                             pf->offset_loaded, &os->eth.rx_broadcast,
1884                             &ns->eth.rx_broadcast);
1885         /* Workaround: CRC size should not be included in byte statistics,
1886          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
1887          */
1888         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
1889                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
1890
1891         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1892                             pf->offset_loaded, &os->eth.rx_discards,
1893                             &ns->eth.rx_discards);
1894         /* GLPRT_REPC not supported */
1895         /* GLPRT_RMPC not supported */
1896         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1897                             pf->offset_loaded,
1898                             &os->eth.rx_unknown_protocol,
1899                             &ns->eth.rx_unknown_protocol);
1900         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1901                             I40E_GLPRT_GOTCL(hw->port),
1902                             pf->offset_loaded, &os->eth.tx_bytes,
1903                             &ns->eth.tx_bytes);
1904         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1905                             I40E_GLPRT_UPTCL(hw->port),
1906                             pf->offset_loaded, &os->eth.tx_unicast,
1907                             &ns->eth.tx_unicast);
1908         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1909                             I40E_GLPRT_MPTCL(hw->port),
1910                             pf->offset_loaded, &os->eth.tx_multicast,
1911                             &ns->eth.tx_multicast);
1912         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1913                             I40E_GLPRT_BPTCL(hw->port),
1914                             pf->offset_loaded, &os->eth.tx_broadcast,
1915                             &ns->eth.tx_broadcast);
1916         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
1917                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
1918         /* GLPRT_TEPC not supported */
1919
1920         /* additional port specific stats */
1921         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1922                             pf->offset_loaded, &os->tx_dropped_link_down,
1923                             &ns->tx_dropped_link_down);
1924         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1925                             pf->offset_loaded, &os->crc_errors,
1926                             &ns->crc_errors);
1927         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1928                             pf->offset_loaded, &os->illegal_bytes,
1929                             &ns->illegal_bytes);
1930         /* GLPRT_ERRBC not supported */
1931         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1932                             pf->offset_loaded, &os->mac_local_faults,
1933                             &ns->mac_local_faults);
1934         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1935                             pf->offset_loaded, &os->mac_remote_faults,
1936                             &ns->mac_remote_faults);
1937         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1938                             pf->offset_loaded, &os->rx_length_errors,
1939                             &ns->rx_length_errors);
1940         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1941                             pf->offset_loaded, &os->link_xon_rx,
1942                             &ns->link_xon_rx);
1943         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1944                             pf->offset_loaded, &os->link_xoff_rx,
1945                             &ns->link_xoff_rx);
1946         for (i = 0; i < 8; i++) {
1947                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1948                                     pf->offset_loaded,
1949                                     &os->priority_xon_rx[i],
1950                                     &ns->priority_xon_rx[i]);
1951                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1952                                     pf->offset_loaded,
1953                                     &os->priority_xoff_rx[i],
1954                                     &ns->priority_xoff_rx[i]);
1955         }
1956         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1957                             pf->offset_loaded, &os->link_xon_tx,
1958                             &ns->link_xon_tx);
1959         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1960                             pf->offset_loaded, &os->link_xoff_tx,
1961                             &ns->link_xoff_tx);
1962         for (i = 0; i < 8; i++) {
1963                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1964                                     pf->offset_loaded,
1965                                     &os->priority_xon_tx[i],
1966                                     &ns->priority_xon_tx[i]);
1967                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1968                                     pf->offset_loaded,
1969                                     &os->priority_xoff_tx[i],
1970                                     &ns->priority_xoff_tx[i]);
1971                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1972                                     pf->offset_loaded,
1973                                     &os->priority_xon_2_xoff[i],
1974                                     &ns->priority_xon_2_xoff[i]);
1975         }
1976         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1977                             I40E_GLPRT_PRC64L(hw->port),
1978                             pf->offset_loaded, &os->rx_size_64,
1979                             &ns->rx_size_64);
1980         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1981                             I40E_GLPRT_PRC127L(hw->port),
1982                             pf->offset_loaded, &os->rx_size_127,
1983                             &ns->rx_size_127);
1984         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1985                             I40E_GLPRT_PRC255L(hw->port),
1986                             pf->offset_loaded, &os->rx_size_255,
1987                             &ns->rx_size_255);
1988         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1989                             I40E_GLPRT_PRC511L(hw->port),
1990                             pf->offset_loaded, &os->rx_size_511,
1991                             &ns->rx_size_511);
1992         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1993                             I40E_GLPRT_PRC1023L(hw->port),
1994                             pf->offset_loaded, &os->rx_size_1023,
1995                             &ns->rx_size_1023);
1996         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1997                             I40E_GLPRT_PRC1522L(hw->port),
1998                             pf->offset_loaded, &os->rx_size_1522,
1999                             &ns->rx_size_1522);
2000         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2001                             I40E_GLPRT_PRC9522L(hw->port),
2002                             pf->offset_loaded, &os->rx_size_big,
2003                             &ns->rx_size_big);
2004         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2005                             pf->offset_loaded, &os->rx_undersize,
2006                             &ns->rx_undersize);
2007         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2008                             pf->offset_loaded, &os->rx_fragments,
2009                             &ns->rx_fragments);
2010         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2011                             pf->offset_loaded, &os->rx_oversize,
2012                             &ns->rx_oversize);
2013         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2014                             pf->offset_loaded, &os->rx_jabber,
2015                             &ns->rx_jabber);
2016         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2017                             I40E_GLPRT_PTC64L(hw->port),
2018                             pf->offset_loaded, &os->tx_size_64,
2019                             &ns->tx_size_64);
2020         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2021                             I40E_GLPRT_PTC127L(hw->port),
2022                             pf->offset_loaded, &os->tx_size_127,
2023                             &ns->tx_size_127);
2024         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2025                             I40E_GLPRT_PTC255L(hw->port),
2026                             pf->offset_loaded, &os->tx_size_255,
2027                             &ns->tx_size_255);
2028         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2029                             I40E_GLPRT_PTC511L(hw->port),
2030                             pf->offset_loaded, &os->tx_size_511,
2031                             &ns->tx_size_511);
2032         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2033                             I40E_GLPRT_PTC1023L(hw->port),
2034                             pf->offset_loaded, &os->tx_size_1023,
2035                             &ns->tx_size_1023);
2036         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2037                             I40E_GLPRT_PTC1522L(hw->port),
2038                             pf->offset_loaded, &os->tx_size_1522,
2039                             &ns->tx_size_1522);
2040         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2041                             I40E_GLPRT_PTC9522L(hw->port),
2042                             pf->offset_loaded, &os->tx_size_big,
2043                             &ns->tx_size_big);
2044         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2045                            pf->offset_loaded,
2046                            &os->fd_sb_match, &ns->fd_sb_match);
2047         /* GLPRT_MSPDC not supported */
2048         /* GLPRT_XEC not supported */
2049
2050         pf->offset_loaded = true;
2051
2052         if (pf->main_vsi)
2053                 i40e_update_vsi_stats(pf->main_vsi);
2054 }
2055
2056 /* Get all statistics of a port */
2057 static void
2058 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2059 {
2060         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2061         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2062         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2063         unsigned i;
2064
2065         /* call read registers - updates values, now write them to struct */
2066         i40e_read_stats_registers(pf, hw);
2067
2068         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
2069                         pf->main_vsi->eth_stats.rx_multicast +
2070                         pf->main_vsi->eth_stats.rx_broadcast -
2071                         pf->main_vsi->eth_stats.rx_discards;
2072         stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
2073                         pf->main_vsi->eth_stats.tx_multicast +
2074                         pf->main_vsi->eth_stats.tx_broadcast;
2075         stats->ibytes   = ns->eth.rx_bytes;
2076         stats->obytes   = ns->eth.tx_bytes;
2077         stats->oerrors  = ns->eth.tx_errors +
2078                         pf->main_vsi->eth_stats.tx_errors;
2079         stats->imcasts  = pf->main_vsi->eth_stats.rx_multicast;
2080
2081         /* Rx Errors */
2082         stats->imissed  = ns->eth.rx_discards +
2083                         pf->main_vsi->eth_stats.rx_discards;
2084         stats->ierrors  = ns->crc_errors +
2085                         ns->rx_length_errors + ns->rx_undersize +
2086                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber +
2087                         stats->imissed;
2088
2089         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2090         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2091         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2092         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2093         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2094         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2095         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2096                     ns->eth.rx_unknown_protocol);
2097         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2098         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2099         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2100         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2101         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2102         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2103
2104         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2105                     ns->tx_dropped_link_down);
2106         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2107         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2108                     ns->illegal_bytes);
2109         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2110         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2111                     ns->mac_local_faults);
2112         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2113                     ns->mac_remote_faults);
2114         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2115                     ns->rx_length_errors);
2116         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2117         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2118         for (i = 0; i < 8; i++) {
2119                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
2120                                 i, ns->priority_xon_rx[i]);
2121                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
2122                                 i, ns->priority_xoff_rx[i]);
2123         }
2124         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
2125         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
2126         for (i = 0; i < 8; i++) {
2127                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
2128                                 i, ns->priority_xon_tx[i]);
2129                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
2130                                 i, ns->priority_xoff_tx[i]);
2131                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
2132                                 i, ns->priority_xon_2_xoff[i]);
2133         }
2134         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
2135         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
2136         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
2137         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
2138         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
2139         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
2140         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
2141         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
2142         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
2143         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
2144         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
2145         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
2146         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
2147         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
2148         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
2149         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
2150         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
2151         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
2152         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2153                         ns->mac_short_packet_dropped);
2154         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
2155                     ns->checksum_error);
2156         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
2157         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2158 }
2159
2160 /* Reset the statistics */
2161 static void
2162 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2163 {
2164         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2165         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2166
2167         /* Mark PF and VSI stats to update the offset, aka "reset" */
2168         pf->offset_loaded = false;
2169         if (pf->main_vsi)
2170                 pf->main_vsi->offset_loaded = false;
2171
2172         /* read the stats, reading current register values into offset */
2173         i40e_read_stats_registers(pf, hw);
2174 }
2175
2176 static uint32_t
2177 i40e_xstats_calc_num(void)
2178 {
2179         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
2180                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
2181                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
2182 }
2183
2184 static int
2185 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
2186                     unsigned n)
2187 {
2188         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2189         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2190         unsigned i, count, prio;
2191         struct i40e_hw_port_stats *hw_stats = &pf->stats;
2192
2193         count = i40e_xstats_calc_num();
2194         if (n < count)
2195                 return count;
2196
2197         i40e_read_stats_registers(pf, hw);
2198
2199         if (xstats == NULL)
2200                 return 0;
2201
2202         count = 0;
2203
2204         /* Get stats from i40e_eth_stats struct */
2205         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2206                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2207                          "%s", rte_i40e_stats_strings[i].name);
2208                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
2209                         rte_i40e_stats_strings[i].offset);
2210                 count++;
2211         }
2212
2213         /* Get individiual stats from i40e_hw_port struct */
2214         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2215                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2216                          "%s", rte_i40e_hw_port_strings[i].name);
2217                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2218                                 rte_i40e_hw_port_strings[i].offset);
2219                 count++;
2220         }
2221
2222         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2223                 for (prio = 0; prio < 8; prio++) {
2224                         snprintf(xstats[count].name,
2225                                  sizeof(xstats[count].name),
2226                                  "rx_priority%u_%s", prio,
2227                                  rte_i40e_rxq_prio_strings[i].name);
2228                         xstats[count].value =
2229                                 *(uint64_t *)(((char *)hw_stats) +
2230                                 rte_i40e_rxq_prio_strings[i].offset +
2231                                 (sizeof(uint64_t) * prio));
2232                         count++;
2233                 }
2234         }
2235
2236         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2237                 for (prio = 0; prio < 8; prio++) {
2238                         snprintf(xstats[count].name,
2239                                  sizeof(xstats[count].name),
2240                                  "tx_priority%u_%s", prio,
2241                                  rte_i40e_txq_prio_strings[i].name);
2242                         xstats[count].value =
2243                                 *(uint64_t *)(((char *)hw_stats) +
2244                                 rte_i40e_txq_prio_strings[i].offset +
2245                                 (sizeof(uint64_t) * prio));
2246                         count++;
2247                 }
2248         }
2249
2250         return count;
2251 }
2252
2253 static int
2254 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
2255                                  __rte_unused uint16_t queue_id,
2256                                  __rte_unused uint8_t stat_idx,
2257                                  __rte_unused uint8_t is_rx)
2258 {
2259         PMD_INIT_FUNC_TRACE();
2260
2261         return -ENOSYS;
2262 }
2263
2264 static void
2265 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2266 {
2267         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2268         struct i40e_vsi *vsi = pf->main_vsi;
2269
2270         dev_info->max_rx_queues = vsi->nb_qps;
2271         dev_info->max_tx_queues = vsi->nb_qps;
2272         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2273         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2274         dev_info->max_mac_addrs = vsi->max_macaddrs;
2275         dev_info->max_vfs = dev->pci_dev->max_vfs;
2276         dev_info->rx_offload_capa =
2277                 DEV_RX_OFFLOAD_VLAN_STRIP |
2278                 DEV_RX_OFFLOAD_QINQ_STRIP |
2279                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2280                 DEV_RX_OFFLOAD_UDP_CKSUM |
2281                 DEV_RX_OFFLOAD_TCP_CKSUM;
2282         dev_info->tx_offload_capa =
2283                 DEV_TX_OFFLOAD_VLAN_INSERT |
2284                 DEV_TX_OFFLOAD_QINQ_INSERT |
2285                 DEV_TX_OFFLOAD_IPV4_CKSUM |
2286                 DEV_TX_OFFLOAD_UDP_CKSUM |
2287                 DEV_TX_OFFLOAD_TCP_CKSUM |
2288                 DEV_TX_OFFLOAD_SCTP_CKSUM |
2289                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2290                 DEV_TX_OFFLOAD_TCP_TSO;
2291         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
2292                                                 sizeof(uint32_t);
2293         dev_info->reta_size = pf->hash_lut_size;
2294         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
2295
2296         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2297                 .rx_thresh = {
2298                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
2299                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
2300                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
2301                 },
2302                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2303                 .rx_drop_en = 0,
2304         };
2305
2306         dev_info->default_txconf = (struct rte_eth_txconf) {
2307                 .tx_thresh = {
2308                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
2309                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
2310                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
2311                 },
2312                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2313                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2314                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2315                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2316         };
2317
2318         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2319                 .nb_max = I40E_MAX_RING_DESC,
2320                 .nb_min = I40E_MIN_RING_DESC,
2321                 .nb_align = I40E_ALIGN_RING_DESC,
2322         };
2323
2324         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2325                 .nb_max = I40E_MAX_RING_DESC,
2326                 .nb_min = I40E_MIN_RING_DESC,
2327                 .nb_align = I40E_ALIGN_RING_DESC,
2328         };
2329
2330         if (pf->flags & I40E_FLAG_VMDQ) {
2331                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
2332                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
2333                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
2334                                                 pf->max_nb_vmdq_vsi;
2335                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
2336                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
2337                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
2338         }
2339 }
2340
2341 static int
2342 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2343 {
2344         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2345         struct i40e_vsi *vsi = pf->main_vsi;
2346         PMD_INIT_FUNC_TRACE();
2347
2348         if (on)
2349                 return i40e_vsi_add_vlan(vsi, vlan_id);
2350         else
2351                 return i40e_vsi_delete_vlan(vsi, vlan_id);
2352 }
2353
2354 static int
2355 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
2356                    enum rte_vlan_type vlan_type,
2357                    uint16_t tpid)
2358 {
2359         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2360         uint64_t reg_r = 0, reg_w = 0;
2361         uint16_t reg_id = 0;
2362         int ret = 0;
2363
2364         switch (vlan_type) {
2365         case ETH_VLAN_TYPE_OUTER:
2366                 reg_id = 2;
2367                 break;
2368         case ETH_VLAN_TYPE_INNER:
2369                 reg_id = 3;
2370                 break;
2371         default:
2372                 ret = -EINVAL;
2373                 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2374                 return ret;
2375         }
2376         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2377                                           &reg_r, NULL);
2378         if (ret != I40E_SUCCESS) {
2379                 PMD_DRV_LOG(ERR, "Fail to debug read from "
2380                             "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
2381                 ret = -EIO;
2382                 return ret;
2383         }
2384         PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: "
2385                     "0x%08"PRIx64"", reg_id, reg_r);
2386
2387         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
2388         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
2389         if (reg_r == reg_w) {
2390                 ret = 0;
2391                 PMD_DRV_LOG(DEBUG, "No need to write");
2392                 return ret;
2393         }
2394
2395         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2396                                            reg_w, NULL);
2397         if (ret != I40E_SUCCESS) {
2398                 ret = -EIO;
2399                 PMD_DRV_LOG(ERR, "Fail to debug write to "
2400                             "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
2401                 return ret;
2402         }
2403         PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2404                     "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2405
2406         return ret;
2407 }
2408
2409 static void
2410 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2411 {
2412         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2413         struct i40e_vsi *vsi = pf->main_vsi;
2414
2415         if (mask & ETH_VLAN_FILTER_MASK) {
2416                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2417                         i40e_vsi_config_vlan_filter(vsi, TRUE);
2418                 else
2419                         i40e_vsi_config_vlan_filter(vsi, FALSE);
2420         }
2421
2422         if (mask & ETH_VLAN_STRIP_MASK) {
2423                 /* Enable or disable VLAN stripping */
2424                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2425                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
2426                 else
2427                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
2428         }
2429
2430         if (mask & ETH_VLAN_EXTEND_MASK) {
2431                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2432                         i40e_vsi_config_double_vlan(vsi, TRUE);
2433                 else
2434                         i40e_vsi_config_double_vlan(vsi, FALSE);
2435         }
2436 }
2437
2438 static void
2439 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
2440                           __rte_unused uint16_t queue,
2441                           __rte_unused int on)
2442 {
2443         PMD_INIT_FUNC_TRACE();
2444 }
2445
2446 static int
2447 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2448 {
2449         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2450         struct i40e_vsi *vsi = pf->main_vsi;
2451         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
2452         struct i40e_vsi_vlan_pvid_info info;
2453
2454         memset(&info, 0, sizeof(info));
2455         info.on = on;
2456         if (info.on)
2457                 info.config.pvid = pvid;
2458         else {
2459                 info.config.reject.tagged =
2460                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
2461                 info.config.reject.untagged =
2462                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
2463         }
2464
2465         return i40e_vsi_vlan_pvid_set(vsi, &info);
2466 }
2467
2468 static int
2469 i40e_dev_led_on(struct rte_eth_dev *dev)
2470 {
2471         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2472         uint32_t mode = i40e_led_get(hw);
2473
2474         if (mode == 0)
2475                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
2476
2477         return 0;
2478 }
2479
2480 static int
2481 i40e_dev_led_off(struct rte_eth_dev *dev)
2482 {
2483         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2484         uint32_t mode = i40e_led_get(hw);
2485
2486         if (mode != 0)
2487                 i40e_led_set(hw, 0, false);
2488
2489         return 0;
2490 }
2491
2492 static int
2493 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2494 {
2495         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2496         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2497
2498         fc_conf->pause_time = pf->fc_conf.pause_time;
2499         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
2500         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
2501
2502          /* Return current mode according to actual setting*/
2503         switch (hw->fc.current_mode) {
2504         case I40E_FC_FULL:
2505                 fc_conf->mode = RTE_FC_FULL;
2506                 break;
2507         case I40E_FC_TX_PAUSE:
2508                 fc_conf->mode = RTE_FC_TX_PAUSE;
2509                 break;
2510         case I40E_FC_RX_PAUSE:
2511                 fc_conf->mode = RTE_FC_RX_PAUSE;
2512                 break;
2513         case I40E_FC_NONE:
2514         default:
2515                 fc_conf->mode = RTE_FC_NONE;
2516         };
2517
2518         return 0;
2519 }
2520
2521 static int
2522 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2523 {
2524         uint32_t mflcn_reg, fctrl_reg, reg;
2525         uint32_t max_high_water;
2526         uint8_t i, aq_failure;
2527         int err;
2528         struct i40e_hw *hw;
2529         struct i40e_pf *pf;
2530         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
2531                 [RTE_FC_NONE] = I40E_FC_NONE,
2532                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
2533                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
2534                 [RTE_FC_FULL] = I40E_FC_FULL
2535         };
2536
2537         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
2538
2539         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
2540         if ((fc_conf->high_water > max_high_water) ||
2541                         (fc_conf->high_water < fc_conf->low_water)) {
2542                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, "
2543                         "High_water must <= %d.", max_high_water);
2544                 return -EINVAL;
2545         }
2546
2547         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2548         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2549         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
2550
2551         pf->fc_conf.pause_time = fc_conf->pause_time;
2552         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
2553         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
2554
2555         PMD_INIT_FUNC_TRACE();
2556
2557         /* All the link flow control related enable/disable register
2558          * configuration is handle by the F/W
2559          */
2560         err = i40e_set_fc(hw, &aq_failure, true);
2561         if (err < 0)
2562                 return -ENOSYS;
2563
2564         if (i40e_is_40G_device(hw->device_id)) {
2565                 /* Configure flow control refresh threshold,
2566                  * the value for stat_tx_pause_refresh_timer[8]
2567                  * is used for global pause operation.
2568                  */
2569
2570                 I40E_WRITE_REG(hw,
2571                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
2572                                pf->fc_conf.pause_time);
2573
2574                 /* configure the timer value included in transmitted pause
2575                  * frame,
2576                  * the value for stat_tx_pause_quanta[8] is used for global
2577                  * pause operation
2578                  */
2579                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
2580                                pf->fc_conf.pause_time);
2581
2582                 fctrl_reg = I40E_READ_REG(hw,
2583                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
2584
2585                 if (fc_conf->mac_ctrl_frame_fwd != 0)
2586                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
2587                 else
2588                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
2589
2590                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
2591                                fctrl_reg);
2592         } else {
2593                 /* Configure pause time (2 TCs per register) */
2594                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
2595                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
2596                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
2597
2598                 /* Configure flow control refresh threshold value */
2599                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
2600                                pf->fc_conf.pause_time / 2);
2601
2602                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2603
2604                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
2605                  *depending on configuration
2606                  */
2607                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
2608                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
2609                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
2610                 } else {
2611                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
2612                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
2613                 }
2614
2615                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
2616         }
2617
2618         /* config the water marker both based on the packets and bytes */
2619         I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
2620                        (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
2621                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
2622         I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
2623                        (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
2624                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
2625         I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
2626                        pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
2627                        << I40E_KILOSHIFT);
2628         I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
2629                        pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
2630                        << I40E_KILOSHIFT);
2631
2632         I40E_WRITE_FLUSH(hw);
2633
2634         return 0;
2635 }
2636
2637 static int
2638 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
2639                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
2640 {
2641         PMD_INIT_FUNC_TRACE();
2642
2643         return -ENOSYS;
2644 }
2645
2646 /* Add a MAC address, and update filters */
2647 static void
2648 i40e_macaddr_add(struct rte_eth_dev *dev,
2649                  struct ether_addr *mac_addr,
2650                  __rte_unused uint32_t index,
2651                  uint32_t pool)
2652 {
2653         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2654         struct i40e_mac_filter_info mac_filter;
2655         struct i40e_vsi *vsi;
2656         int ret;
2657
2658         /* If VMDQ not enabled or configured, return */
2659         if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
2660                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
2661                         pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
2662                         pool);
2663                 return;
2664         }
2665
2666         if (pool > pf->nb_cfg_vmdq_vsi) {
2667                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
2668                                 pool, pf->nb_cfg_vmdq_vsi);
2669                 return;
2670         }
2671
2672         (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
2673         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2674                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2675         else
2676                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
2677
2678         if (pool == 0)
2679                 vsi = pf->main_vsi;
2680         else
2681                 vsi = pf->vmdq[pool - 1].vsi;
2682
2683         ret = i40e_vsi_add_mac(vsi, &mac_filter);
2684         if (ret != I40E_SUCCESS) {
2685                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
2686                 return;
2687         }
2688 }
2689
2690 /* Remove a MAC address, and update filters */
2691 static void
2692 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2693 {
2694         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2695         struct i40e_vsi *vsi;
2696         struct rte_eth_dev_data *data = dev->data;
2697         struct ether_addr *macaddr;
2698         int ret;
2699         uint32_t i;
2700         uint64_t pool_sel;
2701
2702         macaddr = &(data->mac_addrs[index]);
2703
2704         pool_sel = dev->data->mac_pool_sel[index];
2705
2706         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
2707                 if (pool_sel & (1ULL << i)) {
2708                         if (i == 0)
2709                                 vsi = pf->main_vsi;
2710                         else {
2711                                 /* No VMDQ pool enabled or configured */
2712                                 if (!(pf->flags | I40E_FLAG_VMDQ) ||
2713                                         (i > pf->nb_cfg_vmdq_vsi)) {
2714                                         PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
2715                                                         "/configured");
2716                                         return;
2717                                 }
2718                                 vsi = pf->vmdq[i - 1].vsi;
2719                         }
2720                         ret = i40e_vsi_delete_mac(vsi, macaddr);
2721
2722                         if (ret) {
2723                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
2724                                 return;
2725                         }
2726                 }
2727         }
2728 }
2729
2730 /* Set perfect match or hash match of MAC and VLAN for a VF */
2731 static int
2732 i40e_vf_mac_filter_set(struct i40e_pf *pf,
2733                  struct rte_eth_mac_filter *filter,
2734                  bool add)
2735 {
2736         struct i40e_hw *hw;
2737         struct i40e_mac_filter_info mac_filter;
2738         struct ether_addr old_mac;
2739         struct ether_addr *new_mac;
2740         struct i40e_pf_vf *vf = NULL;
2741         uint16_t vf_id;
2742         int ret;
2743
2744         if (pf == NULL) {
2745                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
2746                 return -EINVAL;
2747         }
2748         hw = I40E_PF_TO_HW(pf);
2749
2750         if (filter == NULL) {
2751                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
2752                 return -EINVAL;
2753         }
2754
2755         new_mac = &filter->mac_addr;
2756
2757         if (is_zero_ether_addr(new_mac)) {
2758                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
2759                 return -EINVAL;
2760         }
2761
2762         vf_id = filter->dst_id;
2763
2764         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
2765                 PMD_DRV_LOG(ERR, "Invalid argument.");
2766                 return -EINVAL;
2767         }
2768         vf = &pf->vfs[vf_id];
2769
2770         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
2771                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
2772                 return -EINVAL;
2773         }
2774
2775         if (add) {
2776                 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
2777                 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
2778                                 ETHER_ADDR_LEN);
2779                 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
2780                                  ETHER_ADDR_LEN);
2781
2782                 mac_filter.filter_type = filter->filter_type;
2783                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
2784                 if (ret != I40E_SUCCESS) {
2785                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2786                         return -1;
2787                 }
2788                 ether_addr_copy(new_mac, &pf->dev_addr);
2789         } else {
2790                 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
2791                                 ETHER_ADDR_LEN);
2792                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
2793                 if (ret != I40E_SUCCESS) {
2794                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
2795                         return -1;
2796                 }
2797
2798                 /* Clear device address as it has been removed */
2799                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
2800                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
2801         }
2802
2803         return 0;
2804 }
2805
2806 /* MAC filter handle */
2807 static int
2808 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
2809                 void *arg)
2810 {
2811         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2812         struct rte_eth_mac_filter *filter;
2813         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2814         int ret = I40E_NOT_SUPPORTED;
2815
2816         filter = (struct rte_eth_mac_filter *)(arg);
2817
2818         switch (filter_op) {
2819         case RTE_ETH_FILTER_NOP:
2820                 ret = I40E_SUCCESS;
2821                 break;
2822         case RTE_ETH_FILTER_ADD:
2823                 i40e_pf_disable_irq0(hw);
2824                 if (filter->is_vf)
2825                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
2826                 i40e_pf_enable_irq0(hw);
2827                 break;
2828         case RTE_ETH_FILTER_DELETE:
2829                 i40e_pf_disable_irq0(hw);
2830                 if (filter->is_vf)
2831                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
2832                 i40e_pf_enable_irq0(hw);
2833                 break;
2834         default:
2835                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
2836                 ret = I40E_ERR_PARAM;
2837                 break;
2838         }
2839
2840         return ret;
2841 }
2842
2843 static int
2844 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2845 {
2846         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2847         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2848         int ret;
2849
2850         if (!lut)
2851                 return -EINVAL;
2852
2853         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2854                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
2855                                           lut, lut_size);
2856                 if (ret) {
2857                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2858                         return ret;
2859                 }
2860         } else {
2861                 uint32_t *lut_dw = (uint32_t *)lut;
2862                 uint16_t i, lut_size_dw = lut_size / 4;
2863
2864                 for (i = 0; i < lut_size_dw; i++)
2865                         lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
2866         }
2867
2868         return 0;
2869 }
2870
2871 static int
2872 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2873 {
2874         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2875         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2876         int ret;
2877
2878         if (!vsi || !lut)
2879                 return -EINVAL;
2880
2881         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2882                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
2883                                           lut, lut_size);
2884                 if (ret) {
2885                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2886                         return ret;
2887                 }
2888         } else {
2889                 uint32_t *lut_dw = (uint32_t *)lut;
2890                 uint16_t i, lut_size_dw = lut_size / 4;
2891
2892                 for (i = 0; i < lut_size_dw; i++)
2893                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
2894                 I40E_WRITE_FLUSH(hw);
2895         }
2896
2897         return 0;
2898 }
2899
2900 static int
2901 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
2902                          struct rte_eth_rss_reta_entry64 *reta_conf,
2903                          uint16_t reta_size)
2904 {
2905         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2906         uint16_t i, lut_size = pf->hash_lut_size;
2907         uint16_t idx, shift;
2908         uint8_t *lut;
2909         int ret;
2910
2911         if (reta_size != lut_size ||
2912                 reta_size > ETH_RSS_RETA_SIZE_512) {
2913                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2914                         "(%d) doesn't match the number hardware can supported "
2915                                         "(%d)\n", reta_size, lut_size);
2916                 return -EINVAL;
2917         }
2918
2919         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2920         if (!lut) {
2921                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2922                 return -ENOMEM;
2923         }
2924         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
2925         if (ret)
2926                 goto out;
2927         for (i = 0; i < reta_size; i++) {
2928                 idx = i / RTE_RETA_GROUP_SIZE;
2929                 shift = i % RTE_RETA_GROUP_SIZE;
2930                 if (reta_conf[idx].mask & (1ULL << shift))
2931                         lut[i] = reta_conf[idx].reta[shift];
2932         }
2933         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
2934
2935 out:
2936         rte_free(lut);
2937
2938         return ret;
2939 }
2940
2941 static int
2942 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
2943                         struct rte_eth_rss_reta_entry64 *reta_conf,
2944                         uint16_t reta_size)
2945 {
2946         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2947         uint16_t i, lut_size = pf->hash_lut_size;
2948         uint16_t idx, shift;
2949         uint8_t *lut;
2950         int ret;
2951
2952         if (reta_size != lut_size ||
2953                 reta_size > ETH_RSS_RETA_SIZE_512) {
2954                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2955                         "(%d) doesn't match the number hardware can supported "
2956                                         "(%d)\n", reta_size, lut_size);
2957                 return -EINVAL;
2958         }
2959
2960         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2961         if (!lut) {
2962                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2963                 return -ENOMEM;
2964         }
2965
2966         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
2967         if (ret)
2968                 goto out;
2969         for (i = 0; i < reta_size; i++) {
2970                 idx = i / RTE_RETA_GROUP_SIZE;
2971                 shift = i % RTE_RETA_GROUP_SIZE;
2972                 if (reta_conf[idx].mask & (1ULL << shift))
2973                         reta_conf[idx].reta[shift] = lut[i];
2974         }
2975
2976 out:
2977         rte_free(lut);
2978
2979         return ret;
2980 }
2981
2982 /**
2983  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
2984  * @hw:   pointer to the HW structure
2985  * @mem:  pointer to mem struct to fill out
2986  * @size: size of memory requested
2987  * @alignment: what to align the allocation to
2988  **/
2989 enum i40e_status_code
2990 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2991                         struct i40e_dma_mem *mem,
2992                         u64 size,
2993                         u32 alignment)
2994 {
2995         const struct rte_memzone *mz = NULL;
2996         char z_name[RTE_MEMZONE_NAMESIZE];
2997
2998         if (!mem)
2999                 return I40E_ERR_PARAM;
3000
3001         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
3002         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
3003                                          alignment, RTE_PGSIZE_2M);
3004         if (!mz)
3005                 return I40E_ERR_NO_MEMORY;
3006
3007         mem->size = size;
3008         mem->va = mz->addr;
3009         mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
3010         mem->zone = (const void *)mz;
3011         PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
3012                     "%"PRIu64, mz->name, mem->pa);
3013
3014         return I40E_SUCCESS;
3015 }
3016
3017 /**
3018  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
3019  * @hw:   pointer to the HW structure
3020  * @mem:  ptr to mem struct to free
3021  **/
3022 enum i40e_status_code
3023 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3024                     struct i40e_dma_mem *mem)
3025 {
3026         if (!mem)
3027                 return I40E_ERR_PARAM;
3028
3029         PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
3030                     "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
3031                     mem->pa);
3032         rte_memzone_free((const struct rte_memzone *)mem->zone);
3033         mem->zone = NULL;
3034         mem->va = NULL;
3035         mem->pa = (u64)0;
3036
3037         return I40E_SUCCESS;
3038 }
3039
3040 /**
3041  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
3042  * @hw:   pointer to the HW structure
3043  * @mem:  pointer to mem struct to fill out
3044  * @size: size of memory requested
3045  **/
3046 enum i40e_status_code
3047 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3048                          struct i40e_virt_mem *mem,
3049                          u32 size)
3050 {
3051         if (!mem)
3052                 return I40E_ERR_PARAM;
3053
3054         mem->size = size;
3055         mem->va = rte_zmalloc("i40e", size, 0);
3056
3057         if (mem->va)
3058                 return I40E_SUCCESS;
3059         else
3060                 return I40E_ERR_NO_MEMORY;
3061 }
3062
3063 /**
3064  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
3065  * @hw:   pointer to the HW structure
3066  * @mem:  pointer to mem struct to free
3067  **/
3068 enum i40e_status_code
3069 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3070                      struct i40e_virt_mem *mem)
3071 {
3072         if (!mem)
3073                 return I40E_ERR_PARAM;
3074
3075         rte_free(mem->va);
3076         mem->va = NULL;
3077
3078         return I40E_SUCCESS;
3079 }
3080
3081 void
3082 i40e_init_spinlock_d(struct i40e_spinlock *sp)
3083 {
3084         rte_spinlock_init(&sp->spinlock);
3085 }
3086
3087 void
3088 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
3089 {
3090         rte_spinlock_lock(&sp->spinlock);
3091 }
3092
3093 void
3094 i40e_release_spinlock_d(struct i40e_spinlock *sp)
3095 {
3096         rte_spinlock_unlock(&sp->spinlock);
3097 }
3098
3099 void
3100 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
3101 {
3102         return;
3103 }
3104
3105 /**
3106  * Get the hardware capabilities, which will be parsed
3107  * and saved into struct i40e_hw.
3108  */
3109 static int
3110 i40e_get_cap(struct i40e_hw *hw)
3111 {
3112         struct i40e_aqc_list_capabilities_element_resp *buf;
3113         uint16_t len, size = 0;
3114         int ret;
3115
3116         /* Calculate a huge enough buff for saving response data temporarily */
3117         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
3118                                                 I40E_MAX_CAP_ELE_NUM;
3119         buf = rte_zmalloc("i40e", len, 0);
3120         if (!buf) {
3121                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
3122                 return I40E_ERR_NO_MEMORY;
3123         }
3124
3125         /* Get, parse the capabilities and save it to hw */
3126         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
3127                         i40e_aqc_opc_list_func_capabilities, NULL);
3128         if (ret != I40E_SUCCESS)
3129                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
3130
3131         /* Free the temporary buffer after being used */
3132         rte_free(buf);
3133
3134         return ret;
3135 }
3136
3137 static int
3138 i40e_pf_parameter_init(struct rte_eth_dev *dev)
3139 {
3140         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3141         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3142         uint16_t qp_count = 0, vsi_count = 0;
3143
3144         if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
3145                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
3146                 return -EINVAL;
3147         }
3148         /* Add the parameter init for LFC */
3149         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
3150         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
3151         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
3152
3153         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
3154         pf->max_num_vsi = hw->func_caps.num_vsis;
3155         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
3156         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
3157         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3158
3159         /* FDir queue/VSI allocation */
3160         pf->fdir_qp_offset = 0;
3161         if (hw->func_caps.fd) {
3162                 pf->flags |= I40E_FLAG_FDIR;
3163                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
3164         } else {
3165                 pf->fdir_nb_qps = 0;
3166         }
3167         qp_count += pf->fdir_nb_qps;
3168         vsi_count += 1;
3169
3170         /* LAN queue/VSI allocation */
3171         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
3172         if (!hw->func_caps.rss) {
3173                 pf->lan_nb_qps = 1;
3174         } else {
3175                 pf->flags |= I40E_FLAG_RSS;
3176                 if (hw->mac.type == I40E_MAC_X722)
3177                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
3178                 pf->lan_nb_qps = pf->lan_nb_qp_max;
3179         }
3180         qp_count += pf->lan_nb_qps;
3181         vsi_count += 1;
3182
3183         /* VF queue/VSI allocation */
3184         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
3185         if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
3186                 pf->flags |= I40E_FLAG_SRIOV;
3187                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3188                 pf->vf_num = dev->pci_dev->max_vfs;
3189                 PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
3190                             "in total %u queues", pf->vf_num, pf->vf_nb_qps,
3191                             pf->vf_nb_qps * pf->vf_num);
3192         } else {
3193                 pf->vf_nb_qps = 0;
3194                 pf->vf_num = 0;
3195         }
3196         qp_count += pf->vf_nb_qps * pf->vf_num;
3197         vsi_count += pf->vf_num;
3198
3199         /* VMDq queue/VSI allocation */
3200         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
3201         pf->vmdq_nb_qps = 0;
3202         pf->max_nb_vmdq_vsi = 0;
3203         if (hw->func_caps.vmdq) {
3204                 if (qp_count < hw->func_caps.num_tx_qp &&
3205                         vsi_count < hw->func_caps.num_vsis) {
3206                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
3207                                 qp_count) / pf->vmdq_nb_qp_max;
3208
3209                         /* Limit the maximum number of VMDq vsi to the maximum
3210                          * ethdev can support
3211                          */
3212                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3213                                 hw->func_caps.num_vsis - vsi_count);
3214                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3215                                 ETH_64_POOLS);
3216                         if (pf->max_nb_vmdq_vsi) {
3217                                 pf->flags |= I40E_FLAG_VMDQ;
3218                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
3219                                 PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
3220                                             "per VMDQ VSI, in total %u queues",
3221                                             pf->max_nb_vmdq_vsi,
3222                                             pf->vmdq_nb_qps, pf->vmdq_nb_qps *
3223                                             pf->max_nb_vmdq_vsi);
3224                         } else {
3225                                 PMD_DRV_LOG(INFO, "No enough queues left for "
3226                                             "VMDq");
3227                         }
3228                 } else {
3229                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
3230                 }
3231         }
3232         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
3233         vsi_count += pf->max_nb_vmdq_vsi;
3234
3235         if (hw->func_caps.dcb)
3236                 pf->flags |= I40E_FLAG_DCB;
3237
3238         if (qp_count > hw->func_caps.num_tx_qp) {
3239                 PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
3240                             "the hardware maximum %u", qp_count,
3241                             hw->func_caps.num_tx_qp);
3242                 return -EINVAL;
3243         }
3244         if (vsi_count > hw->func_caps.num_vsis) {
3245                 PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
3246                             "the hardware maximum %u", vsi_count,
3247                             hw->func_caps.num_vsis);
3248                 return -EINVAL;
3249         }
3250
3251         return 0;
3252 }
3253
3254 static int
3255 i40e_pf_get_switch_config(struct i40e_pf *pf)
3256 {
3257         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3258         struct i40e_aqc_get_switch_config_resp *switch_config;
3259         struct i40e_aqc_switch_config_element_resp *element;
3260         uint16_t start_seid = 0, num_reported;
3261         int ret;
3262
3263         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
3264                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
3265         if (!switch_config) {
3266                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
3267                 return -ENOMEM;
3268         }
3269
3270         /* Get the switch configurations */
3271         ret = i40e_aq_get_switch_config(hw, switch_config,
3272                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
3273         if (ret != I40E_SUCCESS) {
3274                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
3275                 goto fail;
3276         }
3277         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
3278         if (num_reported != 1) { /* The number should be 1 */
3279                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
3280                 goto fail;
3281         }
3282
3283         /* Parse the switch configuration elements */
3284         element = &(switch_config->element[0]);
3285         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
3286                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
3287                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
3288         } else
3289                 PMD_DRV_LOG(INFO, "Unknown element type");
3290
3291 fail:
3292         rte_free(switch_config);
3293
3294         return ret;
3295 }
3296
3297 static int
3298 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
3299                         uint32_t num)
3300 {
3301         struct pool_entry *entry;
3302
3303         if (pool == NULL || num == 0)
3304                 return -EINVAL;
3305
3306         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
3307         if (entry == NULL) {
3308                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
3309                 return -ENOMEM;
3310         }
3311
3312         /* queue heap initialize */
3313         pool->num_free = num;
3314         pool->num_alloc = 0;
3315         pool->base = base;
3316         LIST_INIT(&pool->alloc_list);
3317         LIST_INIT(&pool->free_list);
3318
3319         /* Initialize element  */
3320         entry->base = 0;
3321         entry->len = num;
3322
3323         LIST_INSERT_HEAD(&pool->free_list, entry, next);
3324         return 0;
3325 }
3326
3327 static void
3328 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
3329 {
3330         struct pool_entry *entry;
3331
3332         if (pool == NULL)
3333                 return;
3334
3335         LIST_FOREACH(entry, &pool->alloc_list, next) {
3336                 LIST_REMOVE(entry, next);
3337                 rte_free(entry);
3338         }
3339
3340         LIST_FOREACH(entry, &pool->free_list, next) {
3341                 LIST_REMOVE(entry, next);
3342                 rte_free(entry);
3343         }
3344
3345         pool->num_free = 0;
3346         pool->num_alloc = 0;
3347         pool->base = 0;
3348         LIST_INIT(&pool->alloc_list);
3349         LIST_INIT(&pool->free_list);
3350 }
3351
3352 static int
3353 i40e_res_pool_free(struct i40e_res_pool_info *pool,
3354                        uint32_t base)
3355 {
3356         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
3357         uint32_t pool_offset;
3358         int insert;
3359
3360         if (pool == NULL) {
3361                 PMD_DRV_LOG(ERR, "Invalid parameter");
3362                 return -EINVAL;
3363         }
3364
3365         pool_offset = base - pool->base;
3366         /* Lookup in alloc list */
3367         LIST_FOREACH(entry, &pool->alloc_list, next) {
3368                 if (entry->base == pool_offset) {
3369                         valid_entry = entry;
3370                         LIST_REMOVE(entry, next);
3371                         break;
3372                 }
3373         }
3374
3375         /* Not find, return */
3376         if (valid_entry == NULL) {
3377                 PMD_DRV_LOG(ERR, "Failed to find entry");
3378                 return -EINVAL;
3379         }
3380
3381         /**
3382          * Found it, move it to free list  and try to merge.
3383          * In order to make merge easier, always sort it by qbase.
3384          * Find adjacent prev and last entries.
3385          */
3386         prev = next = NULL;
3387         LIST_FOREACH(entry, &pool->free_list, next) {
3388                 if (entry->base > valid_entry->base) {
3389                         next = entry;
3390                         break;
3391                 }
3392                 prev = entry;
3393         }
3394
3395         insert = 0;
3396         /* Try to merge with next one*/
3397         if (next != NULL) {
3398                 /* Merge with next one */
3399                 if (valid_entry->base + valid_entry->len == next->base) {
3400                         next->base = valid_entry->base;
3401                         next->len += valid_entry->len;
3402                         rte_free(valid_entry);
3403                         valid_entry = next;
3404                         insert = 1;
3405                 }
3406         }
3407
3408         if (prev != NULL) {
3409                 /* Merge with previous one */
3410                 if (prev->base + prev->len == valid_entry->base) {
3411                         prev->len += valid_entry->len;
3412                         /* If it merge with next one, remove next node */
3413                         if (insert == 1) {
3414                                 LIST_REMOVE(valid_entry, next);
3415                                 rte_free(valid_entry);
3416                         } else {
3417                                 rte_free(valid_entry);
3418                                 insert = 1;
3419                         }
3420                 }
3421         }
3422
3423         /* Not find any entry to merge, insert */
3424         if (insert == 0) {
3425                 if (prev != NULL)
3426                         LIST_INSERT_AFTER(prev, valid_entry, next);
3427                 else if (next != NULL)
3428                         LIST_INSERT_BEFORE(next, valid_entry, next);
3429                 else /* It's empty list, insert to head */
3430                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
3431         }
3432
3433         pool->num_free += valid_entry->len;
3434         pool->num_alloc -= valid_entry->len;
3435
3436         return 0;
3437 }
3438
3439 static int
3440 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
3441                        uint16_t num)
3442 {
3443         struct pool_entry *entry, *valid_entry;
3444
3445         if (pool == NULL || num == 0) {
3446                 PMD_DRV_LOG(ERR, "Invalid parameter");
3447                 return -EINVAL;
3448         }
3449
3450         if (pool->num_free < num) {
3451                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
3452                             num, pool->num_free);
3453                 return -ENOMEM;
3454         }
3455
3456         valid_entry = NULL;
3457         /* Lookup  in free list and find most fit one */
3458         LIST_FOREACH(entry, &pool->free_list, next) {
3459                 if (entry->len >= num) {
3460                         /* Find best one */
3461                         if (entry->len == num) {
3462                                 valid_entry = entry;
3463                                 break;
3464                         }
3465                         if (valid_entry == NULL || valid_entry->len > entry->len)
3466                                 valid_entry = entry;
3467                 }
3468         }
3469
3470         /* Not find one to satisfy the request, return */
3471         if (valid_entry == NULL) {
3472                 PMD_DRV_LOG(ERR, "No valid entry found");
3473                 return -ENOMEM;
3474         }
3475         /**
3476          * The entry have equal queue number as requested,
3477          * remove it from alloc_list.
3478          */
3479         if (valid_entry->len == num) {
3480                 LIST_REMOVE(valid_entry, next);
3481         } else {
3482                 /**
3483                  * The entry have more numbers than requested,
3484                  * create a new entry for alloc_list and minus its
3485                  * queue base and number in free_list.
3486                  */
3487                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
3488                 if (entry == NULL) {
3489                         PMD_DRV_LOG(ERR, "Failed to allocate memory for "
3490                                     "resource pool");
3491                         return -ENOMEM;
3492                 }
3493                 entry->base = valid_entry->base;
3494                 entry->len = num;
3495                 valid_entry->base += num;
3496                 valid_entry->len -= num;
3497                 valid_entry = entry;
3498         }
3499
3500         /* Insert it into alloc list, not sorted */
3501         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
3502
3503         pool->num_free -= valid_entry->len;
3504         pool->num_alloc += valid_entry->len;
3505
3506         return valid_entry->base + pool->base;
3507 }
3508
3509 /**
3510  * bitmap_is_subset - Check whether src2 is subset of src1
3511  **/
3512 static inline int
3513 bitmap_is_subset(uint8_t src1, uint8_t src2)
3514 {
3515         return !((src1 ^ src2) & src2);
3516 }
3517
3518 static enum i40e_status_code
3519 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
3520 {
3521         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3522
3523         /* If DCB is not supported, only default TC is supported */
3524         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
3525                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
3526                 return I40E_NOT_SUPPORTED;
3527         }
3528
3529         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
3530                 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
3531                             "HW support 0x%x", hw->func_caps.enabled_tcmap,
3532                             enabled_tcmap);
3533                 return I40E_NOT_SUPPORTED;
3534         }
3535         return I40E_SUCCESS;
3536 }
3537
3538 int
3539 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
3540                                 struct i40e_vsi_vlan_pvid_info *info)
3541 {
3542         struct i40e_hw *hw;
3543         struct i40e_vsi_context ctxt;
3544         uint8_t vlan_flags = 0;
3545         int ret;
3546
3547         if (vsi == NULL || info == NULL) {
3548                 PMD_DRV_LOG(ERR, "invalid parameters");
3549                 return I40E_ERR_PARAM;
3550         }
3551
3552         if (info->on) {
3553                 vsi->info.pvid = info->config.pvid;
3554                 /**
3555                  * If insert pvid is enabled, only tagged pkts are
3556                  * allowed to be sent out.
3557                  */
3558                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
3559                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
3560         } else {
3561                 vsi->info.pvid = 0;
3562                 if (info->config.reject.tagged == 0)
3563                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
3564
3565                 if (info->config.reject.untagged == 0)
3566                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
3567         }
3568         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
3569                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
3570         vsi->info.port_vlan_flags |= vlan_flags;
3571         vsi->info.valid_sections =
3572                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3573         memset(&ctxt, 0, sizeof(ctxt));
3574         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3575         ctxt.seid = vsi->seid;
3576
3577         hw = I40E_VSI_TO_HW(vsi);
3578         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3579         if (ret != I40E_SUCCESS)
3580                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
3581
3582         return ret;
3583 }
3584
3585 static int
3586 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
3587 {
3588         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3589         int i, ret;
3590         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
3591
3592         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
3593         if (ret != I40E_SUCCESS)
3594                 return ret;
3595
3596         if (!vsi->seid) {
3597                 PMD_DRV_LOG(ERR, "seid not valid");
3598                 return -EINVAL;
3599         }
3600
3601         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
3602         tc_bw_data.tc_valid_bits = enabled_tcmap;
3603         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3604                 tc_bw_data.tc_bw_credits[i] =
3605                         (enabled_tcmap & (1 << i)) ? 1 : 0;
3606
3607         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
3608         if (ret != I40E_SUCCESS) {
3609                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
3610                 return ret;
3611         }
3612
3613         (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
3614                                         sizeof(vsi->info.qs_handle));
3615         return I40E_SUCCESS;
3616 }
3617
3618 static enum i40e_status_code
3619 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
3620                                  struct i40e_aqc_vsi_properties_data *info,
3621                                  uint8_t enabled_tcmap)
3622 {
3623         enum i40e_status_code ret;
3624         int i, total_tc = 0;
3625         uint16_t qpnum_per_tc, bsf, qp_idx;
3626
3627         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
3628         if (ret != I40E_SUCCESS)
3629                 return ret;
3630
3631         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3632                 if (enabled_tcmap & (1 << i))
3633                         total_tc++;
3634         vsi->enabled_tc = enabled_tcmap;
3635
3636         /* Number of queues per enabled TC */
3637         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
3638         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
3639         bsf = rte_bsf32(qpnum_per_tc);
3640
3641         /* Adjust the queue number to actual queues that can be applied */
3642         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
3643                 vsi->nb_qps = qpnum_per_tc * total_tc;
3644
3645         /**
3646          * Configure TC and queue mapping parameters, for enabled TC,
3647          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
3648          * default queue will serve it.
3649          */
3650         qp_idx = 0;
3651         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3652                 if (vsi->enabled_tc & (1 << i)) {
3653                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
3654                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
3655                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
3656                         qp_idx += qpnum_per_tc;
3657                 } else
3658                         info->tc_mapping[i] = 0;
3659         }
3660
3661         /* Associate queue number with VSI */
3662         if (vsi->type == I40E_VSI_SRIOV) {
3663                 info->mapping_flags |=
3664                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
3665                 for (i = 0; i < vsi->nb_qps; i++)
3666                         info->queue_mapping[i] =
3667                                 rte_cpu_to_le_16(vsi->base_queue + i);
3668         } else {
3669                 info->mapping_flags |=
3670                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
3671                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
3672         }
3673         info->valid_sections |=
3674                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
3675
3676         return I40E_SUCCESS;
3677 }
3678
3679 static int
3680 i40e_veb_release(struct i40e_veb *veb)
3681 {
3682         struct i40e_vsi *vsi;
3683         struct i40e_hw *hw;
3684
3685         if (veb == NULL || veb->associate_vsi == NULL)
3686                 return -EINVAL;
3687
3688         if (!TAILQ_EMPTY(&veb->head)) {
3689                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
3690                 return -EACCES;
3691         }
3692
3693         vsi = veb->associate_vsi;
3694         hw = I40E_VSI_TO_HW(vsi);
3695
3696         vsi->uplink_seid = veb->uplink_seid;
3697         i40e_aq_delete_element(hw, veb->seid, NULL);
3698         rte_free(veb);
3699         vsi->veb = NULL;
3700         return I40E_SUCCESS;
3701 }
3702
3703 /* Setup a veb */
3704 static struct i40e_veb *
3705 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
3706 {
3707         struct i40e_veb *veb;
3708         int ret;
3709         struct i40e_hw *hw;
3710
3711         if (NULL == pf || vsi == NULL) {
3712                 PMD_DRV_LOG(ERR, "veb setup failed, "
3713                             "associated VSI shouldn't null");
3714                 return NULL;
3715         }
3716         hw = I40E_PF_TO_HW(pf);
3717
3718         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
3719         if (!veb) {
3720                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
3721                 goto fail;
3722         }
3723
3724         veb->associate_vsi = vsi;
3725         TAILQ_INIT(&veb->head);
3726         veb->uplink_seid = vsi->uplink_seid;
3727
3728         ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
3729                 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
3730
3731         if (ret != I40E_SUCCESS) {
3732                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
3733                             hw->aq.asq_last_status);
3734                 goto fail;
3735         }
3736
3737         /* get statistics index */
3738         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
3739                                 &veb->stats_idx, NULL, NULL, NULL);
3740         if (ret != I40E_SUCCESS) {
3741                 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
3742                             hw->aq.asq_last_status);
3743                 goto fail;
3744         }
3745
3746         /* Get VEB bandwidth, to be implemented */
3747         /* Now associated vsi binding to the VEB, set uplink to this VEB */
3748         vsi->uplink_seid = veb->seid;
3749
3750         return veb;
3751 fail:
3752         rte_free(veb);
3753         return NULL;
3754 }
3755
3756 int
3757 i40e_vsi_release(struct i40e_vsi *vsi)
3758 {
3759         struct i40e_pf *pf;
3760         struct i40e_hw *hw;
3761         struct i40e_vsi_list *vsi_list;
3762         int ret;
3763         struct i40e_mac_filter *f;
3764
3765         if (!vsi)
3766                 return I40E_SUCCESS;
3767
3768         pf = I40E_VSI_TO_PF(vsi);
3769         hw = I40E_VSI_TO_HW(vsi);
3770
3771         /* VSI has child to attach, release child first */
3772         if (vsi->veb) {
3773                 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
3774                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
3775                                 return -1;
3776                         TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
3777                 }
3778                 i40e_veb_release(vsi->veb);
3779         }
3780
3781         /* Remove all macvlan filters of the VSI */
3782         i40e_vsi_remove_all_macvlan_filter(vsi);
3783         TAILQ_FOREACH(f, &vsi->mac_list, next)
3784                 rte_free(f);
3785
3786         if (vsi->type != I40E_VSI_MAIN) {
3787                 /* Remove vsi from parent's sibling list */
3788                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
3789                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
3790                         return I40E_ERR_PARAM;
3791                 }
3792                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
3793                                 &vsi->sib_vsi_list, list);
3794
3795                 /* Remove all switch element of the VSI */
3796                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
3797                 if (ret != I40E_SUCCESS)
3798                         PMD_DRV_LOG(ERR, "Failed to delete element");
3799         }
3800         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
3801
3802         if (vsi->type != I40E_VSI_SRIOV)
3803                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
3804         rte_free(vsi);
3805
3806         return I40E_SUCCESS;
3807 }
3808
3809 static int
3810 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
3811 {
3812         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3813         struct i40e_aqc_remove_macvlan_element_data def_filter;
3814         struct i40e_mac_filter_info filter;
3815         int ret;
3816
3817         if (vsi->type != I40E_VSI_MAIN)
3818                 return I40E_ERR_CONFIG;
3819         memset(&def_filter, 0, sizeof(def_filter));
3820         (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
3821                                         ETH_ADDR_LEN);
3822         def_filter.vlan_tag = 0;
3823         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
3824                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3825         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
3826         if (ret != I40E_SUCCESS) {
3827                 struct i40e_mac_filter *f;
3828                 struct ether_addr *mac;
3829
3830                 PMD_DRV_LOG(WARNING, "Cannot remove the default "
3831                             "macvlan filter");
3832                 /* It needs to add the permanent mac into mac list */
3833                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
3834                 if (f == NULL) {
3835                         PMD_DRV_LOG(ERR, "failed to allocate memory");
3836                         return I40E_ERR_NO_MEMORY;
3837                 }
3838                 mac = &f->mac_info.mac_addr;
3839                 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
3840                                 ETH_ADDR_LEN);
3841                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3842                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
3843                 vsi->mac_num++;
3844
3845                 return ret;
3846         }
3847         (void)rte_memcpy(&filter.mac_addr,
3848                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
3849         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3850         return i40e_vsi_add_mac(vsi, &filter);
3851 }
3852
3853 #define I40E_3_BIT_MASK     0x7
3854 /*
3855  * i40e_vsi_get_bw_config - Query VSI BW Information
3856  * @vsi: the VSI to be queried
3857  *
3858  * Returns 0 on success, negative value on failure
3859  */
3860 static enum i40e_status_code
3861 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
3862 {
3863         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
3864         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
3865         struct i40e_hw *hw = &vsi->adapter->hw;
3866         i40e_status ret;
3867         int i;
3868         uint32_t bw_max;
3869
3870         memset(&bw_config, 0, sizeof(bw_config));
3871         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3872         if (ret != I40E_SUCCESS) {
3873                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
3874                             hw->aq.asq_last_status);
3875                 return ret;
3876         }
3877
3878         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
3879         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
3880                                         &ets_sla_config, NULL);
3881         if (ret != I40E_SUCCESS) {
3882                 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
3883                             "configuration %u", hw->aq.asq_last_status);
3884                 return ret;
3885         }
3886
3887         /* store and print out BW info */
3888         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
3889         vsi->bw_info.bw_max = bw_config.max_bw;
3890         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
3891         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
3892         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
3893                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
3894                      I40E_16_BIT_WIDTH);
3895         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3896                 vsi->bw_info.bw_ets_share_credits[i] =
3897                                 ets_sla_config.share_credits[i];
3898                 vsi->bw_info.bw_ets_credits[i] =
3899                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
3900                 /* 4 bits per TC, 4th bit is reserved */
3901                 vsi->bw_info.bw_ets_max[i] =
3902                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
3903                                   I40E_3_BIT_MASK);
3904                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
3905                             vsi->bw_info.bw_ets_share_credits[i]);
3906                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
3907                             vsi->bw_info.bw_ets_credits[i]);
3908                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
3909                             vsi->bw_info.bw_ets_max[i]);
3910         }
3911
3912         return I40E_SUCCESS;
3913 }
3914
3915 /* Setup a VSI */
3916 struct i40e_vsi *
3917 i40e_vsi_setup(struct i40e_pf *pf,
3918                enum i40e_vsi_type type,
3919                struct i40e_vsi *uplink_vsi,
3920                uint16_t user_param)
3921 {
3922         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3923         struct i40e_vsi *vsi;
3924         struct i40e_mac_filter_info filter;
3925         int ret;
3926         struct i40e_vsi_context ctxt;
3927         struct ether_addr broadcast =
3928                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
3929
3930         if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
3931                 PMD_DRV_LOG(ERR, "VSI setup failed, "
3932                             "VSI link shouldn't be NULL");
3933                 return NULL;
3934         }
3935
3936         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
3937                 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
3938                             "uplink VSI should be NULL");
3939                 return NULL;
3940         }
3941
3942         /* If uplink vsi didn't setup VEB, create one first */
3943         if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
3944                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
3945
3946                 if (NULL == uplink_vsi->veb) {
3947                         PMD_DRV_LOG(ERR, "VEB setup failed");
3948                         return NULL;
3949                 }
3950         }
3951
3952         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
3953         if (!vsi) {
3954                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
3955                 return NULL;
3956         }
3957         TAILQ_INIT(&vsi->mac_list);
3958         vsi->type = type;
3959         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
3960         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
3961         vsi->parent_vsi = uplink_vsi;
3962         vsi->user_param = user_param;
3963         /* Allocate queues */
3964         switch (vsi->type) {
3965         case I40E_VSI_MAIN  :
3966                 vsi->nb_qps = pf->lan_nb_qps;
3967                 break;
3968         case I40E_VSI_SRIOV :
3969                 vsi->nb_qps = pf->vf_nb_qps;
3970                 break;
3971         case I40E_VSI_VMDQ2:
3972                 vsi->nb_qps = pf->vmdq_nb_qps;
3973                 break;
3974         case I40E_VSI_FDIR:
3975                 vsi->nb_qps = pf->fdir_nb_qps;
3976                 break;
3977         default:
3978                 goto fail_mem;
3979         }
3980         /*
3981          * The filter status descriptor is reported in rx queue 0,
3982          * while the tx queue for fdir filter programming has no
3983          * such constraints, can be non-zero queues.
3984          * To simplify it, choose FDIR vsi use queue 0 pair.
3985          * To make sure it will use queue 0 pair, queue allocation
3986          * need be done before this function is called
3987          */
3988         if (type != I40E_VSI_FDIR) {
3989                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
3990                         if (ret < 0) {
3991                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
3992                                                 vsi->seid, ret);
3993                                 goto fail_mem;
3994                         }
3995                         vsi->base_queue = ret;
3996         } else
3997                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
3998
3999         /* VF has MSIX interrupt in VF range, don't allocate here */
4000         if (type == I40E_VSI_MAIN) {
4001                 ret = i40e_res_pool_alloc(&pf->msix_pool,
4002                                           RTE_MIN(vsi->nb_qps,
4003                                                   RTE_MAX_RXTX_INTR_VEC_ID));
4004                 if (ret < 0) {
4005                         PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
4006                                     vsi->seid, ret);
4007                         goto fail_queue_alloc;
4008                 }
4009                 vsi->msix_intr = ret;
4010                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
4011         } else if (type != I40E_VSI_SRIOV) {
4012                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
4013                 if (ret < 0) {
4014                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
4015                         goto fail_queue_alloc;
4016                 }
4017                 vsi->msix_intr = ret;
4018                 vsi->nb_msix = 1;
4019         } else {
4020                 vsi->msix_intr = 0;
4021                 vsi->nb_msix = 0;
4022         }
4023
4024         /* Add VSI */
4025         if (type == I40E_VSI_MAIN) {
4026                 /* For main VSI, no need to add since it's default one */
4027                 vsi->uplink_seid = pf->mac_seid;
4028                 vsi->seid = pf->main_vsi_seid;
4029                 /* Bind queues with specific MSIX interrupt */
4030                 /**
4031                  * Needs 2 interrupt at least, one for misc cause which will
4032                  * enabled from OS side, Another for queues binding the
4033                  * interrupt from device side only.
4034                  */
4035
4036                 /* Get default VSI parameters from hardware */
4037                 memset(&ctxt, 0, sizeof(ctxt));
4038                 ctxt.seid = vsi->seid;
4039                 ctxt.pf_num = hw->pf_id;
4040                 ctxt.uplink_seid = vsi->uplink_seid;
4041                 ctxt.vf_num = 0;
4042                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4043                 if (ret != I40E_SUCCESS) {
4044                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
4045                         goto fail_msix_alloc;
4046                 }
4047                 (void)rte_memcpy(&vsi->info, &ctxt.info,
4048                         sizeof(struct i40e_aqc_vsi_properties_data));
4049                 vsi->vsi_id = ctxt.vsi_number;
4050                 vsi->info.valid_sections = 0;
4051
4052                 /* Configure tc, enabled TC0 only */
4053                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
4054                         I40E_SUCCESS) {
4055                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
4056                         goto fail_msix_alloc;
4057                 }
4058
4059                 /* TC, queue mapping */
4060                 memset(&ctxt, 0, sizeof(ctxt));
4061                 vsi->info.valid_sections |=
4062                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4063                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
4064                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4065                 (void)rte_memcpy(&ctxt.info, &vsi->info,
4066                         sizeof(struct i40e_aqc_vsi_properties_data));
4067                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4068                                                 I40E_DEFAULT_TCMAP);
4069                 if (ret != I40E_SUCCESS) {
4070                         PMD_DRV_LOG(ERR, "Failed to configure "
4071                                     "TC queue mapping");
4072                         goto fail_msix_alloc;
4073                 }
4074                 ctxt.seid = vsi->seid;
4075                 ctxt.pf_num = hw->pf_id;
4076                 ctxt.uplink_seid = vsi->uplink_seid;
4077                 ctxt.vf_num = 0;
4078
4079                 /* Update VSI parameters */
4080                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4081                 if (ret != I40E_SUCCESS) {
4082                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
4083                         goto fail_msix_alloc;
4084                 }
4085
4086                 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
4087                                                 sizeof(vsi->info.tc_mapping));
4088                 (void)rte_memcpy(&vsi->info.queue_mapping,
4089                                 &ctxt.info.queue_mapping,
4090                         sizeof(vsi->info.queue_mapping));
4091                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
4092                 vsi->info.valid_sections = 0;
4093
4094                 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
4095                                 ETH_ADDR_LEN);
4096
4097                 /**
4098                  * Updating default filter settings are necessary to prevent
4099                  * reception of tagged packets.
4100                  * Some old firmware configurations load a default macvlan
4101                  * filter which accepts both tagged and untagged packets.
4102                  * The updating is to use a normal filter instead if needed.
4103                  * For NVM 4.2.2 or after, the updating is not needed anymore.
4104                  * The firmware with correct configurations load the default
4105                  * macvlan filter which is expected and cannot be removed.
4106                  */
4107                 i40e_update_default_filter_setting(vsi);
4108                 i40e_config_qinq(hw, vsi);
4109         } else if (type == I40E_VSI_SRIOV) {
4110                 memset(&ctxt, 0, sizeof(ctxt));
4111                 /**
4112                  * For other VSI, the uplink_seid equals to uplink VSI's
4113                  * uplink_seid since they share same VEB
4114                  */
4115                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4116                 ctxt.pf_num = hw->pf_id;
4117                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
4118                 ctxt.uplink_seid = vsi->uplink_seid;
4119                 ctxt.connection_type = 0x1;
4120                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
4121
4122                 /**
4123                  * Do not configure switch ID to enable VEB switch by
4124                  * I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB. Because in Fortville,
4125                  * if the source mac address of packet sent from VF is not
4126                  * listed in the VEB's mac table, the VEB will switch the
4127                  * packet back to the VF. Need to enable it when HW issue
4128                  * is fixed.
4129                  */
4130
4131                 /* Configure port/vlan */
4132                 ctxt.info.valid_sections |=
4133                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4134                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4135                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4136                                                 I40E_DEFAULT_TCMAP);
4137                 if (ret != I40E_SUCCESS) {
4138                         PMD_DRV_LOG(ERR, "Failed to configure "
4139                                     "TC queue mapping");
4140                         goto fail_msix_alloc;
4141                 }
4142                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4143                 ctxt.info.valid_sections |=
4144                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4145                 /**
4146                  * Since VSI is not created yet, only configure parameter,
4147                  * will add vsi below.
4148                  */
4149
4150                 i40e_config_qinq(hw, vsi);
4151         } else if (type == I40E_VSI_VMDQ2) {
4152                 memset(&ctxt, 0, sizeof(ctxt));
4153                 /*
4154                  * For other VSI, the uplink_seid equals to uplink VSI's
4155                  * uplink_seid since they share same VEB
4156                  */
4157                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4158                 ctxt.pf_num = hw->pf_id;
4159                 ctxt.vf_num = 0;
4160                 ctxt.uplink_seid = vsi->uplink_seid;
4161                 ctxt.connection_type = 0x1;
4162                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
4163
4164                 ctxt.info.valid_sections |=
4165                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4166                 /* user_param carries flag to enable loop back */
4167                 if (user_param) {
4168                         ctxt.info.switch_id =
4169                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
4170                         ctxt.info.switch_id |=
4171                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4172                 }
4173
4174                 /* Configure port/vlan */
4175                 ctxt.info.valid_sections |=
4176                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4177                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4178                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4179                                                 I40E_DEFAULT_TCMAP);
4180                 if (ret != I40E_SUCCESS) {
4181                         PMD_DRV_LOG(ERR, "Failed to configure "
4182                                         "TC queue mapping");
4183                         goto fail_msix_alloc;
4184                 }
4185                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4186                 ctxt.info.valid_sections |=
4187                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4188         } else if (type == I40E_VSI_FDIR) {
4189                 memset(&ctxt, 0, sizeof(ctxt));
4190                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4191                 ctxt.pf_num = hw->pf_id;
4192                 ctxt.vf_num = 0;
4193                 ctxt.uplink_seid = vsi->uplink_seid;
4194                 ctxt.connection_type = 0x1;     /* regular data port */
4195                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4196                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4197                                                 I40E_DEFAULT_TCMAP);
4198                 if (ret != I40E_SUCCESS) {
4199                         PMD_DRV_LOG(ERR, "Failed to configure "
4200                                         "TC queue mapping.");
4201                         goto fail_msix_alloc;
4202                 }
4203                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4204                 ctxt.info.valid_sections |=
4205                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4206         } else {
4207                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
4208                 goto fail_msix_alloc;
4209         }
4210
4211         if (vsi->type != I40E_VSI_MAIN) {
4212                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
4213                 if (ret != I40E_SUCCESS) {
4214                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
4215                                     hw->aq.asq_last_status);
4216                         goto fail_msix_alloc;
4217                 }
4218                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
4219                 vsi->info.valid_sections = 0;
4220                 vsi->seid = ctxt.seid;
4221                 vsi->vsi_id = ctxt.vsi_number;
4222                 vsi->sib_vsi_list.vsi = vsi;
4223                 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
4224                                 &vsi->sib_vsi_list, list);
4225         }
4226
4227         /* MAC/VLAN configuration */
4228         (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
4229         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4230
4231         ret = i40e_vsi_add_mac(vsi, &filter);
4232         if (ret != I40E_SUCCESS) {
4233                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4234                 goto fail_msix_alloc;
4235         }
4236
4237         /* Get VSI BW information */
4238         i40e_vsi_get_bw_config(vsi);
4239         return vsi;
4240 fail_msix_alloc:
4241         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
4242 fail_queue_alloc:
4243         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
4244 fail_mem:
4245         rte_free(vsi);
4246         return NULL;
4247 }
4248
4249 /* Configure vlan filter on or off */
4250 int
4251 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
4252 {
4253         int i, num;
4254         struct i40e_mac_filter *f;
4255         struct i40e_mac_filter_info *mac_filter;
4256         enum rte_mac_filter_type desired_filter;
4257         int ret = I40E_SUCCESS;
4258
4259         if (on) {
4260                 /* Filter to match MAC and VLAN */
4261                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
4262         } else {
4263                 /* Filter to match only MAC */
4264                 desired_filter = RTE_MAC_PERFECT_MATCH;
4265         }
4266
4267         num = vsi->mac_num;
4268
4269         mac_filter = rte_zmalloc("mac_filter_info_data",
4270                                  num * sizeof(*mac_filter), 0);
4271         if (mac_filter == NULL) {
4272                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4273                 return I40E_ERR_NO_MEMORY;
4274         }
4275
4276         i = 0;
4277
4278         /* Remove all existing mac */
4279         TAILQ_FOREACH(f, &vsi->mac_list, next) {
4280                 mac_filter[i] = f->mac_info;
4281                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
4282                 if (ret) {
4283                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
4284                                     on ? "enable" : "disable");
4285                         goto DONE;
4286                 }
4287                 i++;
4288         }
4289
4290         /* Override with new filter */
4291         for (i = 0; i < num; i++) {
4292                 mac_filter[i].filter_type = desired_filter;
4293                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
4294                 if (ret) {
4295                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
4296                                     on ? "enable" : "disable");
4297                         goto DONE;
4298                 }
4299         }
4300
4301 DONE:
4302         rte_free(mac_filter);
4303         return ret;
4304 }
4305
4306 /* Configure vlan stripping on or off */
4307 int
4308 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
4309 {
4310         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4311         struct i40e_vsi_context ctxt;
4312         uint8_t vlan_flags;
4313         int ret = I40E_SUCCESS;
4314
4315         /* Check if it has been already on or off */
4316         if (vsi->info.valid_sections &
4317                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
4318                 if (on) {
4319                         if ((vsi->info.port_vlan_flags &
4320                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
4321                                 return 0; /* already on */
4322                 } else {
4323                         if ((vsi->info.port_vlan_flags &
4324                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
4325                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
4326                                 return 0; /* already off */
4327                 }
4328         }
4329
4330         if (on)
4331                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4332         else
4333                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
4334         vsi->info.valid_sections =
4335                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4336         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
4337         vsi->info.port_vlan_flags |= vlan_flags;
4338         ctxt.seid = vsi->seid;
4339         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4340         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4341         if (ret)
4342                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
4343                             on ? "enable" : "disable");
4344
4345         return ret;
4346 }
4347
4348 static int
4349 i40e_dev_init_vlan(struct rte_eth_dev *dev)
4350 {
4351         struct rte_eth_dev_data *data = dev->data;
4352         int ret;
4353         int mask = 0;
4354
4355         /* Apply vlan offload setting */
4356         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
4357         i40e_vlan_offload_set(dev, mask);
4358
4359         /* Apply double-vlan setting, not implemented yet */
4360
4361         /* Apply pvid setting */
4362         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
4363                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
4364         if (ret)
4365                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
4366
4367         return ret;
4368 }
4369
4370 static int
4371 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
4372 {
4373         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4374
4375         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
4376 }
4377
4378 static int
4379 i40e_update_flow_control(struct i40e_hw *hw)
4380 {
4381 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
4382         struct i40e_link_status link_status;
4383         uint32_t rxfc = 0, txfc = 0, reg;
4384         uint8_t an_info;
4385         int ret;
4386
4387         memset(&link_status, 0, sizeof(link_status));
4388         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
4389         if (ret != I40E_SUCCESS) {
4390                 PMD_DRV_LOG(ERR, "Failed to get link status information");
4391                 goto write_reg; /* Disable flow control */
4392         }
4393
4394         an_info = hw->phy.link_info.an_info;
4395         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
4396                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
4397                 ret = I40E_ERR_NOT_READY;
4398                 goto write_reg; /* Disable flow control */
4399         }
4400         /**
4401          * If link auto negotiation is enabled, flow control needs to
4402          * be configured according to it
4403          */
4404         switch (an_info & I40E_LINK_PAUSE_RXTX) {
4405         case I40E_LINK_PAUSE_RXTX:
4406                 rxfc = 1;
4407                 txfc = 1;
4408                 hw->fc.current_mode = I40E_FC_FULL;
4409                 break;
4410         case I40E_AQ_LINK_PAUSE_RX:
4411                 rxfc = 1;
4412                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
4413                 break;
4414         case I40E_AQ_LINK_PAUSE_TX:
4415                 txfc = 1;
4416                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
4417                 break;
4418         default:
4419                 hw->fc.current_mode = I40E_FC_NONE;
4420                 break;
4421         }
4422
4423 write_reg:
4424         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
4425                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
4426         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4427         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
4428         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
4429         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
4430
4431         return ret;
4432 }
4433
4434 /* PF setup */
4435 static int
4436 i40e_pf_setup(struct i40e_pf *pf)
4437 {
4438         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4439         struct i40e_filter_control_settings settings;
4440         struct i40e_vsi *vsi;
4441         int ret;
4442
4443         /* Clear all stats counters */
4444         pf->offset_loaded = FALSE;
4445         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
4446         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
4447
4448         ret = i40e_pf_get_switch_config(pf);
4449         if (ret != I40E_SUCCESS) {
4450                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
4451                 return ret;
4452         }
4453         if (pf->flags & I40E_FLAG_FDIR) {
4454                 /* make queue allocated first, let FDIR use queue pair 0*/
4455                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
4456                 if (ret != I40E_FDIR_QUEUE_ID) {
4457                         PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
4458                                     " ret =%d", ret);
4459                         pf->flags &= ~I40E_FLAG_FDIR;
4460                 }
4461         }
4462         /*  main VSI setup */
4463         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
4464         if (!vsi) {
4465                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
4466                 return I40E_ERR_NOT_READY;
4467         }
4468         pf->main_vsi = vsi;
4469
4470         /* Configure filter control */
4471         memset(&settings, 0, sizeof(settings));
4472         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
4473                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
4474         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
4475                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
4476         else {
4477                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
4478                                                 hw->func_caps.rss_table_size);
4479                 return I40E_ERR_PARAM;
4480         }
4481         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
4482                         "size: %u\n", hw->func_caps.rss_table_size);
4483         pf->hash_lut_size = hw->func_caps.rss_table_size;
4484
4485         /* Enable ethtype and macvlan filters */
4486         settings.enable_ethtype = TRUE;
4487         settings.enable_macvlan = TRUE;
4488         ret = i40e_set_filter_control(hw, &settings);
4489         if (ret)
4490                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
4491                                                                 ret);
4492
4493         /* Update flow control according to the auto negotiation */
4494         i40e_update_flow_control(hw);
4495
4496         return I40E_SUCCESS;
4497 }
4498
4499 int
4500 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
4501 {
4502         uint32_t reg;
4503         uint16_t j;
4504
4505         /**
4506          * Set or clear TX Queue Disable flags,
4507          * which is required by hardware.
4508          */
4509         i40e_pre_tx_queue_cfg(hw, q_idx, on);
4510         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
4511
4512         /* Wait until the request is finished */
4513         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4514                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4515                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
4516                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
4517                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
4518                                                         & 0x1))) {
4519                         break;
4520                 }
4521         }
4522         if (on) {
4523                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
4524                         return I40E_SUCCESS; /* already on, skip next steps */
4525
4526                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
4527                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4528         } else {
4529                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4530                         return I40E_SUCCESS; /* already off, skip next steps */
4531                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4532         }
4533         /* Write the register */
4534         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
4535         /* Check the result */
4536         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4537                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4538                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
4539                 if (on) {
4540                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
4541                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
4542                                 break;
4543                 } else {
4544                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
4545                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4546                                 break;
4547                 }
4548         }
4549         /* Check if it is timeout */
4550         if (j >= I40E_CHK_Q_ENA_COUNT) {
4551                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
4552                             (on ? "enable" : "disable"), q_idx);
4553                 return I40E_ERR_TIMEOUT;
4554         }
4555
4556         return I40E_SUCCESS;
4557 }
4558
4559 /* Swith on or off the tx queues */
4560 static int
4561 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
4562 {
4563         struct rte_eth_dev_data *dev_data = pf->dev_data;
4564         struct i40e_tx_queue *txq;
4565         struct rte_eth_dev *dev = pf->adapter->eth_dev;
4566         uint16_t i;
4567         int ret;
4568
4569         for (i = 0; i < dev_data->nb_tx_queues; i++) {
4570                 txq = dev_data->tx_queues[i];
4571                 /* Don't operate the queue if not configured or
4572                  * if starting only per queue */
4573                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
4574                         continue;
4575                 if (on)
4576                         ret = i40e_dev_tx_queue_start(dev, i);
4577                 else
4578                         ret = i40e_dev_tx_queue_stop(dev, i);
4579                 if ( ret != I40E_SUCCESS)
4580                         return ret;
4581         }
4582
4583         return I40E_SUCCESS;
4584 }
4585
4586 int
4587 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
4588 {
4589         uint32_t reg;
4590         uint16_t j;
4591
4592         /* Wait until the request is finished */
4593         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4594                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4595                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
4596                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
4597                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
4598                         break;
4599         }
4600
4601         if (on) {
4602                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
4603                         return I40E_SUCCESS; /* Already on, skip next steps */
4604                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4605         } else {
4606                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
4607                         return I40E_SUCCESS; /* Already off, skip next steps */
4608                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4609         }
4610
4611         /* Write the register */
4612         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
4613         /* Check the result */
4614         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4615                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4616                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
4617                 if (on) {
4618                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
4619                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
4620                                 break;
4621                 } else {
4622                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
4623                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
4624                                 break;
4625                 }
4626         }
4627
4628         /* Check if it is timeout */
4629         if (j >= I40E_CHK_Q_ENA_COUNT) {
4630                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
4631                             (on ? "enable" : "disable"), q_idx);
4632                 return I40E_ERR_TIMEOUT;
4633         }
4634
4635         return I40E_SUCCESS;
4636 }
4637 /* Switch on or off the rx queues */
4638 static int
4639 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
4640 {
4641         struct rte_eth_dev_data *dev_data = pf->dev_data;
4642         struct i40e_rx_queue *rxq;
4643         struct rte_eth_dev *dev = pf->adapter->eth_dev;
4644         uint16_t i;
4645         int ret;
4646
4647         for (i = 0; i < dev_data->nb_rx_queues; i++) {
4648                 rxq = dev_data->rx_queues[i];
4649                 /* Don't operate the queue if not configured or
4650                  * if starting only per queue */
4651                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
4652                         continue;
4653                 if (on)
4654                         ret = i40e_dev_rx_queue_start(dev, i);
4655                 else
4656                         ret = i40e_dev_rx_queue_stop(dev, i);
4657                 if (ret != I40E_SUCCESS)
4658                         return ret;
4659         }
4660
4661         return I40E_SUCCESS;
4662 }
4663
4664 /* Switch on or off all the rx/tx queues */
4665 int
4666 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
4667 {
4668         int ret;
4669
4670         if (on) {
4671                 /* enable rx queues before enabling tx queues */
4672                 ret = i40e_dev_switch_rx_queues(pf, on);
4673                 if (ret) {
4674                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
4675                         return ret;
4676                 }
4677                 ret = i40e_dev_switch_tx_queues(pf, on);
4678         } else {
4679                 /* Stop tx queues before stopping rx queues */
4680                 ret = i40e_dev_switch_tx_queues(pf, on);
4681                 if (ret) {
4682                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
4683                         return ret;
4684                 }
4685                 ret = i40e_dev_switch_rx_queues(pf, on);
4686         }
4687
4688         return ret;
4689 }
4690
4691 /* Initialize VSI for TX */
4692 static int
4693 i40e_dev_tx_init(struct i40e_pf *pf)
4694 {
4695         struct rte_eth_dev_data *data = pf->dev_data;
4696         uint16_t i;
4697         uint32_t ret = I40E_SUCCESS;
4698         struct i40e_tx_queue *txq;
4699
4700         for (i = 0; i < data->nb_tx_queues; i++) {
4701                 txq = data->tx_queues[i];
4702                 if (!txq || !txq->q_set)
4703                         continue;
4704                 ret = i40e_tx_queue_init(txq);
4705                 if (ret != I40E_SUCCESS)
4706                         break;
4707         }
4708         if (ret == I40E_SUCCESS)
4709                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
4710                                      ->eth_dev);
4711
4712         return ret;
4713 }
4714
4715 /* Initialize VSI for RX */
4716 static int
4717 i40e_dev_rx_init(struct i40e_pf *pf)
4718 {
4719         struct rte_eth_dev_data *data = pf->dev_data;
4720         int ret = I40E_SUCCESS;
4721         uint16_t i;
4722         struct i40e_rx_queue *rxq;
4723
4724         i40e_pf_config_mq_rx(pf);
4725         for (i = 0; i < data->nb_rx_queues; i++) {
4726                 rxq = data->rx_queues[i];
4727                 if (!rxq || !rxq->q_set)
4728                         continue;
4729
4730                 ret = i40e_rx_queue_init(rxq);
4731                 if (ret != I40E_SUCCESS) {
4732                         PMD_DRV_LOG(ERR, "Failed to do RX queue "
4733                                     "initialization");
4734                         break;
4735                 }
4736         }
4737         if (ret == I40E_SUCCESS)
4738                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
4739                                      ->eth_dev);
4740
4741         return ret;
4742 }
4743
4744 static int
4745 i40e_dev_rxtx_init(struct i40e_pf *pf)
4746 {
4747         int err;
4748
4749         err = i40e_dev_tx_init(pf);
4750         if (err) {
4751                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
4752                 return err;
4753         }
4754         err = i40e_dev_rx_init(pf);
4755         if (err) {
4756                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
4757                 return err;
4758         }
4759
4760         return err;
4761 }
4762
4763 static int
4764 i40e_vmdq_setup(struct rte_eth_dev *dev)
4765 {
4766         struct rte_eth_conf *conf = &dev->data->dev_conf;
4767         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4768         int i, err, conf_vsis, j, loop;
4769         struct i40e_vsi *vsi;
4770         struct i40e_vmdq_info *vmdq_info;
4771         struct rte_eth_vmdq_rx_conf *vmdq_conf;
4772         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4773
4774         /*
4775          * Disable interrupt to avoid message from VF. Furthermore, it will
4776          * avoid race condition in VSI creation/destroy.
4777          */
4778         i40e_pf_disable_irq0(hw);
4779
4780         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
4781                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
4782                 return -ENOTSUP;
4783         }
4784
4785         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
4786         if (conf_vsis > pf->max_nb_vmdq_vsi) {
4787                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
4788                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
4789                         pf->max_nb_vmdq_vsi);
4790                 return -ENOTSUP;
4791         }
4792
4793         if (pf->vmdq != NULL) {
4794                 PMD_INIT_LOG(INFO, "VMDQ already configured");
4795                 return 0;
4796         }
4797
4798         pf->vmdq = rte_zmalloc("vmdq_info_struct",
4799                                 sizeof(*vmdq_info) * conf_vsis, 0);
4800
4801         if (pf->vmdq == NULL) {
4802                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
4803                 return -ENOMEM;
4804         }
4805
4806         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
4807
4808         /* Create VMDQ VSI */
4809         for (i = 0; i < conf_vsis; i++) {
4810                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
4811                                 vmdq_conf->enable_loop_back);
4812                 if (vsi == NULL) {
4813                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
4814                         err = -1;
4815                         goto err_vsi_setup;
4816                 }
4817                 vmdq_info = &pf->vmdq[i];
4818                 vmdq_info->pf = pf;
4819                 vmdq_info->vsi = vsi;
4820         }
4821         pf->nb_cfg_vmdq_vsi = conf_vsis;
4822
4823         /* Configure Vlan */
4824         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
4825         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
4826                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
4827                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
4828                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
4829                                         vmdq_conf->pool_map[i].vlan_id, j);
4830
4831                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
4832                                                 vmdq_conf->pool_map[i].vlan_id);
4833                                 if (err) {
4834                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
4835                                         err = -1;
4836                                         goto err_vsi_setup;
4837                                 }
4838                         }
4839                 }
4840         }
4841
4842         i40e_pf_enable_irq0(hw);
4843
4844         return 0;
4845
4846 err_vsi_setup:
4847         for (i = 0; i < conf_vsis; i++)
4848                 if (pf->vmdq[i].vsi == NULL)
4849                         break;
4850                 else
4851                         i40e_vsi_release(pf->vmdq[i].vsi);
4852
4853         rte_free(pf->vmdq);
4854         pf->vmdq = NULL;
4855         i40e_pf_enable_irq0(hw);
4856         return err;
4857 }
4858
4859 static void
4860 i40e_stat_update_32(struct i40e_hw *hw,
4861                    uint32_t reg,
4862                    bool offset_loaded,
4863                    uint64_t *offset,
4864                    uint64_t *stat)
4865 {
4866         uint64_t new_data;
4867
4868         new_data = (uint64_t)I40E_READ_REG(hw, reg);
4869         if (!offset_loaded)
4870                 *offset = new_data;
4871
4872         if (new_data >= *offset)
4873                 *stat = (uint64_t)(new_data - *offset);
4874         else
4875                 *stat = (uint64_t)((new_data +
4876                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
4877 }
4878
4879 static void
4880 i40e_stat_update_48(struct i40e_hw *hw,
4881                    uint32_t hireg,
4882                    uint32_t loreg,
4883                    bool offset_loaded,
4884                    uint64_t *offset,
4885                    uint64_t *stat)
4886 {
4887         uint64_t new_data;
4888
4889         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
4890         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
4891                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
4892
4893         if (!offset_loaded)
4894                 *offset = new_data;
4895
4896         if (new_data >= *offset)
4897                 *stat = new_data - *offset;
4898         else
4899                 *stat = (uint64_t)((new_data +
4900                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
4901
4902         *stat &= I40E_48_BIT_MASK;
4903 }
4904
4905 /* Disable IRQ0 */
4906 void
4907 i40e_pf_disable_irq0(struct i40e_hw *hw)
4908 {
4909         /* Disable all interrupt types */
4910         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
4911         I40E_WRITE_FLUSH(hw);
4912 }
4913
4914 /* Enable IRQ0 */
4915 void
4916 i40e_pf_enable_irq0(struct i40e_hw *hw)
4917 {
4918         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
4919                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
4920                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4921                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
4922         I40E_WRITE_FLUSH(hw);
4923 }
4924
4925 static void
4926 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
4927 {
4928         /* read pending request and disable first */
4929         i40e_pf_disable_irq0(hw);
4930         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
4931         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
4932                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
4933
4934         if (no_queue)
4935                 /* Link no queues with irq0 */
4936                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
4937                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
4938 }
4939
4940 static void
4941 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
4942 {
4943         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4944         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4945         int i;
4946         uint16_t abs_vf_id;
4947         uint32_t index, offset, val;
4948
4949         if (!pf->vfs)
4950                 return;
4951         /**
4952          * Try to find which VF trigger a reset, use absolute VF id to access
4953          * since the reg is global register.
4954          */
4955         for (i = 0; i < pf->vf_num; i++) {
4956                 abs_vf_id = hw->func_caps.vf_base_id + i;
4957                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
4958                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
4959                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
4960                 /* VFR event occured */
4961                 if (val & (0x1 << offset)) {
4962                         int ret;
4963
4964                         /* Clear the event first */
4965                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
4966                                                         (0x1 << offset));
4967                         PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
4968                         /**
4969                          * Only notify a VF reset event occured,
4970                          * don't trigger another SW reset
4971                          */
4972                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
4973                         if (ret != I40E_SUCCESS)
4974                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
4975                 }
4976         }
4977 }
4978
4979 static void
4980 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
4981 {
4982         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4983         struct i40e_arq_event_info info;
4984         uint16_t pending, opcode;
4985         int ret;
4986
4987         info.buf_len = I40E_AQ_BUF_SZ;
4988         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
4989         if (!info.msg_buf) {
4990                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
4991                 return;
4992         }
4993
4994         pending = 1;
4995         while (pending) {
4996                 ret = i40e_clean_arq_element(hw, &info, &pending);
4997
4998                 if (ret != I40E_SUCCESS) {
4999                         PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
5000                                     "aq_err: %u", hw->aq.asq_last_status);
5001                         break;
5002                 }
5003                 opcode = rte_le_to_cpu_16(info.desc.opcode);
5004
5005                 switch (opcode) {
5006                 case i40e_aqc_opc_send_msg_to_pf:
5007                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
5008                         i40e_pf_host_handle_vf_msg(dev,
5009                                         rte_le_to_cpu_16(info.desc.retval),
5010                                         rte_le_to_cpu_32(info.desc.cookie_high),
5011                                         rte_le_to_cpu_32(info.desc.cookie_low),
5012                                         info.msg_buf,
5013                                         info.msg_len);
5014                         break;
5015                 default:
5016                         PMD_DRV_LOG(ERR, "Request %u is not supported yet",
5017                                     opcode);
5018                         break;
5019                 }
5020         }
5021         rte_free(info.msg_buf);
5022 }
5023
5024 /*
5025  * Interrupt handler is registered as the alarm callback for handling LSC
5026  * interrupt in a definite of time, in order to wait the NIC into a stable
5027  * state. Currently it waits 1 sec in i40e for the link up interrupt, and
5028  * no need for link down interrupt.
5029  */
5030 static void
5031 i40e_dev_interrupt_delayed_handler(void *param)
5032 {
5033         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
5034         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5035         uint32_t icr0;
5036
5037         /* read interrupt causes again */
5038         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
5039
5040 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
5041         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
5042                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
5043         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
5044                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
5045         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
5046                 PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
5047         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
5048                 PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
5049         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
5050                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
5051                                                                 "state\n");
5052         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
5053                 PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
5054         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
5055                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
5056 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
5057
5058         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
5059                 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
5060                 i40e_dev_handle_vfr_event(dev);
5061         }
5062         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
5063                 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
5064                 i40e_dev_handle_aq_msg(dev);
5065         }
5066
5067         /* handle the link up interrupt in an alarm callback */
5068         i40e_dev_link_update(dev, 0);
5069         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
5070
5071         i40e_pf_enable_irq0(hw);
5072         rte_intr_enable(&(dev->pci_dev->intr_handle));
5073 }
5074
5075 /**
5076  * Interrupt handler triggered by NIC  for handling
5077  * specific interrupt.
5078  *
5079  * @param handle
5080  *  Pointer to interrupt handle.
5081  * @param param
5082  *  The address of parameter (struct rte_eth_dev *) regsitered before.
5083  *
5084  * @return
5085  *  void
5086  */
5087 static void
5088 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
5089                            void *param)
5090 {
5091         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
5092         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5093         uint32_t icr0;
5094
5095         /* Disable interrupt */
5096         i40e_pf_disable_irq0(hw);
5097
5098         /* read out interrupt causes */
5099         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
5100
5101         /* No interrupt event indicated */
5102         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
5103                 PMD_DRV_LOG(INFO, "No interrupt event");
5104                 goto done;
5105         }
5106 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
5107         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
5108                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
5109         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
5110                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
5111         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
5112                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
5113         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
5114                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
5115         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
5116                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
5117         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
5118                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
5119         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
5120                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
5121 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
5122
5123         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
5124                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
5125                 i40e_dev_handle_vfr_event(dev);
5126         }
5127         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
5128                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
5129                 i40e_dev_handle_aq_msg(dev);
5130         }
5131
5132         /* Link Status Change interrupt */
5133         if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
5134 #define I40E_US_PER_SECOND 1000000
5135                 struct rte_eth_link link;
5136
5137                 PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
5138                 memset(&link, 0, sizeof(link));
5139                 rte_i40e_dev_atomic_read_link_status(dev, &link);
5140                 i40e_dev_link_update(dev, 0);
5141
5142                 /*
5143                  * For link up interrupt, it needs to wait 1 second to let the
5144                  * hardware be a stable state. Otherwise several consecutive
5145                  * interrupts can be observed.
5146                  * For link down interrupt, no need to wait.
5147                  */
5148                 if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
5149                         i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
5150                         return;
5151                 else
5152                         _rte_eth_dev_callback_process(dev,
5153                                 RTE_ETH_EVENT_INTR_LSC);
5154         }
5155
5156 done:
5157         /* Enable interrupt */
5158         i40e_pf_enable_irq0(hw);
5159         rte_intr_enable(&(dev->pci_dev->intr_handle));
5160 }
5161
5162 static int
5163 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
5164                          struct i40e_macvlan_filter *filter,
5165                          int total)
5166 {
5167         int ele_num, ele_buff_size;
5168         int num, actual_num, i;
5169         uint16_t flags;
5170         int ret = I40E_SUCCESS;
5171         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5172         struct i40e_aqc_add_macvlan_element_data *req_list;
5173
5174         if (filter == NULL  || total == 0)
5175                 return I40E_ERR_PARAM;
5176         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5177         ele_buff_size = hw->aq.asq_buf_size;
5178
5179         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
5180         if (req_list == NULL) {
5181                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5182                 return I40E_ERR_NO_MEMORY;
5183         }
5184
5185         num = 0;
5186         do {
5187                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5188                 memset(req_list, 0, ele_buff_size);
5189
5190                 for (i = 0; i < actual_num; i++) {
5191                         (void)rte_memcpy(req_list[i].mac_addr,
5192                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
5193                         req_list[i].vlan_tag =
5194                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
5195
5196                         switch (filter[num + i].filter_type) {
5197                         case RTE_MAC_PERFECT_MATCH:
5198                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
5199                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5200                                 break;
5201                         case RTE_MACVLAN_PERFECT_MATCH:
5202                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
5203                                 break;
5204                         case RTE_MAC_HASH_MATCH:
5205                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
5206                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5207                                 break;
5208                         case RTE_MACVLAN_HASH_MATCH:
5209                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
5210                                 break;
5211                         default:
5212                                 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
5213                                 ret = I40E_ERR_PARAM;
5214                                 goto DONE;
5215                         }
5216
5217                         req_list[i].queue_number = 0;
5218
5219                         req_list[i].flags = rte_cpu_to_le_16(flags);
5220                 }
5221
5222                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
5223                                                 actual_num, NULL);
5224                 if (ret != I40E_SUCCESS) {
5225                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
5226                         goto DONE;
5227                 }
5228                 num += actual_num;
5229         } while (num < total);
5230
5231 DONE:
5232         rte_free(req_list);
5233         return ret;
5234 }
5235
5236 static int
5237 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
5238                             struct i40e_macvlan_filter *filter,
5239                             int total)
5240 {
5241         int ele_num, ele_buff_size;
5242         int num, actual_num, i;
5243         uint16_t flags;
5244         int ret = I40E_SUCCESS;
5245         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5246         struct i40e_aqc_remove_macvlan_element_data *req_list;
5247
5248         if (filter == NULL  || total == 0)
5249                 return I40E_ERR_PARAM;
5250
5251         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5252         ele_buff_size = hw->aq.asq_buf_size;
5253
5254         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
5255         if (req_list == NULL) {
5256                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5257                 return I40E_ERR_NO_MEMORY;
5258         }
5259
5260         num = 0;
5261         do {
5262                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5263                 memset(req_list, 0, ele_buff_size);
5264
5265                 for (i = 0; i < actual_num; i++) {
5266                         (void)rte_memcpy(req_list[i].mac_addr,
5267                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
5268                         req_list[i].vlan_tag =
5269                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
5270
5271                         switch (filter[num + i].filter_type) {
5272                         case RTE_MAC_PERFECT_MATCH:
5273                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5274                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5275                                 break;
5276                         case RTE_MACVLAN_PERFECT_MATCH:
5277                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
5278                                 break;
5279                         case RTE_MAC_HASH_MATCH:
5280                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
5281                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5282                                 break;
5283                         case RTE_MACVLAN_HASH_MATCH:
5284                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
5285                                 break;
5286                         default:
5287                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
5288                                 ret = I40E_ERR_PARAM;
5289                                 goto DONE;
5290                         }
5291                         req_list[i].flags = rte_cpu_to_le_16(flags);
5292                 }
5293
5294                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
5295                                                 actual_num, NULL);
5296                 if (ret != I40E_SUCCESS) {
5297                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
5298                         goto DONE;
5299                 }
5300                 num += actual_num;
5301         } while (num < total);
5302
5303 DONE:
5304         rte_free(req_list);
5305         return ret;
5306 }
5307
5308 /* Find out specific MAC filter */
5309 static struct i40e_mac_filter *
5310 i40e_find_mac_filter(struct i40e_vsi *vsi,
5311                          struct ether_addr *macaddr)
5312 {
5313         struct i40e_mac_filter *f;
5314
5315         TAILQ_FOREACH(f, &vsi->mac_list, next) {
5316                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
5317                         return f;
5318         }
5319
5320         return NULL;
5321 }
5322
5323 static bool
5324 i40e_find_vlan_filter(struct i40e_vsi *vsi,
5325                          uint16_t vlan_id)
5326 {
5327         uint32_t vid_idx, vid_bit;
5328
5329         if (vlan_id > ETH_VLAN_ID_MAX)
5330                 return 0;
5331
5332         vid_idx = I40E_VFTA_IDX(vlan_id);
5333         vid_bit = I40E_VFTA_BIT(vlan_id);
5334
5335         if (vsi->vfta[vid_idx] & vid_bit)
5336                 return 1;
5337         else
5338                 return 0;
5339 }
5340
5341 static void
5342 i40e_set_vlan_filter(struct i40e_vsi *vsi,
5343                          uint16_t vlan_id, bool on)
5344 {
5345         uint32_t vid_idx, vid_bit;
5346
5347         if (vlan_id > ETH_VLAN_ID_MAX)
5348                 return;
5349
5350         vid_idx = I40E_VFTA_IDX(vlan_id);
5351         vid_bit = I40E_VFTA_BIT(vlan_id);
5352
5353         if (on)
5354                 vsi->vfta[vid_idx] |= vid_bit;
5355         else
5356                 vsi->vfta[vid_idx] &= ~vid_bit;
5357 }
5358
5359 /**
5360  * Find all vlan options for specific mac addr,
5361  * return with actual vlan found.
5362  */
5363 static inline int
5364 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
5365                            struct i40e_macvlan_filter *mv_f,
5366                            int num, struct ether_addr *addr)
5367 {
5368         int i;
5369         uint32_t j, k;
5370
5371         /**
5372          * Not to use i40e_find_vlan_filter to decrease the loop time,
5373          * although the code looks complex.
5374           */
5375         if (num < vsi->vlan_num)
5376                 return I40E_ERR_PARAM;
5377
5378         i = 0;
5379         for (j = 0; j < I40E_VFTA_SIZE; j++) {
5380                 if (vsi->vfta[j]) {
5381                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
5382                                 if (vsi->vfta[j] & (1 << k)) {
5383                                         if (i > num - 1) {
5384                                                 PMD_DRV_LOG(ERR, "vlan number "
5385                                                             "not match");
5386                                                 return I40E_ERR_PARAM;
5387                                         }
5388                                         (void)rte_memcpy(&mv_f[i].macaddr,
5389                                                         addr, ETH_ADDR_LEN);
5390                                         mv_f[i].vlan_id =
5391                                                 j * I40E_UINT32_BIT_SIZE + k;
5392                                         i++;
5393                                 }
5394                         }
5395                 }
5396         }
5397         return I40E_SUCCESS;
5398 }
5399
5400 static inline int
5401 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
5402                            struct i40e_macvlan_filter *mv_f,
5403                            int num,
5404                            uint16_t vlan)
5405 {
5406         int i = 0;
5407         struct i40e_mac_filter *f;
5408
5409         if (num < vsi->mac_num)
5410                 return I40E_ERR_PARAM;
5411
5412         TAILQ_FOREACH(f, &vsi->mac_list, next) {
5413                 if (i > num - 1) {
5414                         PMD_DRV_LOG(ERR, "buffer number not match");
5415                         return I40E_ERR_PARAM;
5416                 }
5417                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
5418                                 ETH_ADDR_LEN);
5419                 mv_f[i].vlan_id = vlan;
5420                 mv_f[i].filter_type = f->mac_info.filter_type;
5421                 i++;
5422         }
5423
5424         return I40E_SUCCESS;
5425 }
5426
5427 static int
5428 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
5429 {
5430         int i, num;
5431         struct i40e_mac_filter *f;
5432         struct i40e_macvlan_filter *mv_f;
5433         int ret = I40E_SUCCESS;
5434
5435         if (vsi == NULL || vsi->mac_num == 0)
5436                 return I40E_ERR_PARAM;
5437
5438         /* Case that no vlan is set */
5439         if (vsi->vlan_num == 0)
5440                 num = vsi->mac_num;
5441         else
5442                 num = vsi->mac_num * vsi->vlan_num;
5443
5444         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
5445         if (mv_f == NULL) {
5446                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5447                 return I40E_ERR_NO_MEMORY;
5448         }
5449
5450         i = 0;
5451         if (vsi->vlan_num == 0) {
5452                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
5453                         (void)rte_memcpy(&mv_f[i].macaddr,
5454                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
5455                         mv_f[i].vlan_id = 0;
5456                         i++;
5457                 }
5458         } else {
5459                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
5460                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
5461                                         vsi->vlan_num, &f->mac_info.mac_addr);
5462                         if (ret != I40E_SUCCESS)
5463                                 goto DONE;
5464                         i += vsi->vlan_num;
5465                 }
5466         }
5467
5468         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
5469 DONE:
5470         rte_free(mv_f);
5471
5472         return ret;
5473 }
5474
5475 int
5476 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
5477 {
5478         struct i40e_macvlan_filter *mv_f;
5479         int mac_num;
5480         int ret = I40E_SUCCESS;
5481
5482         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
5483                 return I40E_ERR_PARAM;
5484
5485         /* If it's already set, just return */
5486         if (i40e_find_vlan_filter(vsi,vlan))
5487                 return I40E_SUCCESS;
5488
5489         mac_num = vsi->mac_num;
5490
5491         if (mac_num == 0) {
5492                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
5493                 return I40E_ERR_PARAM;
5494         }
5495
5496         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
5497
5498         if (mv_f == NULL) {
5499                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5500                 return I40E_ERR_NO_MEMORY;
5501         }
5502
5503         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
5504
5505         if (ret != I40E_SUCCESS)
5506                 goto DONE;
5507
5508         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
5509
5510         if (ret != I40E_SUCCESS)
5511                 goto DONE;
5512
5513         i40e_set_vlan_filter(vsi, vlan, 1);
5514
5515         vsi->vlan_num++;
5516         ret = I40E_SUCCESS;
5517 DONE:
5518         rte_free(mv_f);
5519         return ret;
5520 }
5521
5522 int
5523 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
5524 {
5525         struct i40e_macvlan_filter *mv_f;
5526         int mac_num;
5527         int ret = I40E_SUCCESS;
5528
5529         /**
5530          * Vlan 0 is the generic filter for untagged packets
5531          * and can't be removed.
5532          */
5533         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
5534                 return I40E_ERR_PARAM;
5535
5536         /* If can't find it, just return */
5537         if (!i40e_find_vlan_filter(vsi, vlan))
5538                 return I40E_ERR_PARAM;
5539
5540         mac_num = vsi->mac_num;
5541
5542         if (mac_num == 0) {
5543                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
5544                 return I40E_ERR_PARAM;
5545         }
5546
5547         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
5548
5549         if (mv_f == NULL) {
5550                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5551                 return I40E_ERR_NO_MEMORY;
5552         }
5553
5554         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
5555
5556         if (ret != I40E_SUCCESS)
5557                 goto DONE;
5558
5559         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
5560
5561         if (ret != I40E_SUCCESS)
5562                 goto DONE;
5563
5564         /* This is last vlan to remove, replace all mac filter with vlan 0 */
5565         if (vsi->vlan_num == 1) {
5566                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
5567                 if (ret != I40E_SUCCESS)
5568                         goto DONE;
5569
5570                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
5571                 if (ret != I40E_SUCCESS)
5572                         goto DONE;
5573         }
5574
5575         i40e_set_vlan_filter(vsi, vlan, 0);
5576
5577         vsi->vlan_num--;
5578         ret = I40E_SUCCESS;
5579 DONE:
5580         rte_free(mv_f);
5581         return ret;
5582 }
5583
5584 int
5585 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
5586 {
5587         struct i40e_mac_filter *f;
5588         struct i40e_macvlan_filter *mv_f;
5589         int i, vlan_num = 0;
5590         int ret = I40E_SUCCESS;
5591
5592         /* If it's add and we've config it, return */
5593         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
5594         if (f != NULL)
5595                 return I40E_SUCCESS;
5596         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
5597                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
5598
5599                 /**
5600                  * If vlan_num is 0, that's the first time to add mac,
5601                  * set mask for vlan_id 0.
5602                  */
5603                 if (vsi->vlan_num == 0) {
5604                         i40e_set_vlan_filter(vsi, 0, 1);
5605                         vsi->vlan_num = 1;
5606                 }
5607                 vlan_num = vsi->vlan_num;
5608         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
5609                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
5610                 vlan_num = 1;
5611
5612         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
5613         if (mv_f == NULL) {
5614                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5615                 return I40E_ERR_NO_MEMORY;
5616         }
5617
5618         for (i = 0; i < vlan_num; i++) {
5619                 mv_f[i].filter_type = mac_filter->filter_type;
5620                 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
5621                                 ETH_ADDR_LEN);
5622         }
5623
5624         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
5625                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
5626                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
5627                                         &mac_filter->mac_addr);
5628                 if (ret != I40E_SUCCESS)
5629                         goto DONE;
5630         }
5631
5632         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
5633         if (ret != I40E_SUCCESS)
5634                 goto DONE;
5635
5636         /* Add the mac addr into mac list */
5637         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5638         if (f == NULL) {
5639                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5640                 ret = I40E_ERR_NO_MEMORY;
5641                 goto DONE;
5642         }
5643         (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
5644                         ETH_ADDR_LEN);
5645         f->mac_info.filter_type = mac_filter->filter_type;
5646         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5647         vsi->mac_num++;
5648
5649         ret = I40E_SUCCESS;
5650 DONE:
5651         rte_free(mv_f);
5652
5653         return ret;
5654 }
5655
5656 int
5657 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
5658 {
5659         struct i40e_mac_filter *f;
5660         struct i40e_macvlan_filter *mv_f;
5661         int i, vlan_num;
5662         enum rte_mac_filter_type filter_type;
5663         int ret = I40E_SUCCESS;
5664
5665         /* Can't find it, return an error */
5666         f = i40e_find_mac_filter(vsi, addr);
5667         if (f == NULL)
5668                 return I40E_ERR_PARAM;
5669
5670         vlan_num = vsi->vlan_num;
5671         filter_type = f->mac_info.filter_type;
5672         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
5673                 filter_type == RTE_MACVLAN_HASH_MATCH) {
5674                 if (vlan_num == 0) {
5675                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
5676                         return I40E_ERR_PARAM;
5677                 }
5678         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
5679                         filter_type == RTE_MAC_HASH_MATCH)
5680                 vlan_num = 1;
5681
5682         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
5683         if (mv_f == NULL) {
5684                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5685                 return I40E_ERR_NO_MEMORY;
5686         }
5687
5688         for (i = 0; i < vlan_num; i++) {
5689                 mv_f[i].filter_type = filter_type;
5690                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
5691                                 ETH_ADDR_LEN);
5692         }
5693         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
5694                         filter_type == RTE_MACVLAN_HASH_MATCH) {
5695                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
5696                 if (ret != I40E_SUCCESS)
5697                         goto DONE;
5698         }
5699
5700         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
5701         if (ret != I40E_SUCCESS)
5702                 goto DONE;
5703
5704         /* Remove the mac addr into mac list */
5705         TAILQ_REMOVE(&vsi->mac_list, f, next);
5706         rte_free(f);
5707         vsi->mac_num--;
5708
5709         ret = I40E_SUCCESS;
5710 DONE:
5711         rte_free(mv_f);
5712         return ret;
5713 }
5714
5715 /* Configure hash enable flags for RSS */
5716 uint64_t
5717 i40e_config_hena(uint64_t flags)
5718 {
5719         uint64_t hena = 0;
5720
5721         if (!flags)
5722                 return hena;
5723
5724         if (flags & ETH_RSS_FRAG_IPV4)
5725                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
5726         if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
5727                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
5728         if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
5729                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
5730         if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
5731                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
5732         if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
5733                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
5734         if (flags & ETH_RSS_FRAG_IPV6)
5735                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
5736         if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
5737                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
5738         if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
5739                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
5740         if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
5741                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
5742         if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
5743                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
5744         if (flags & ETH_RSS_L2_PAYLOAD)
5745                 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
5746
5747         return hena;
5748 }
5749
5750 /* Parse the hash enable flags */
5751 uint64_t
5752 i40e_parse_hena(uint64_t flags)
5753 {
5754         uint64_t rss_hf = 0;
5755
5756         if (!flags)
5757                 return rss_hf;
5758         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
5759                 rss_hf |= ETH_RSS_FRAG_IPV4;
5760         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
5761                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
5762         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
5763                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
5764         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
5765                 rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
5766         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
5767                 rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
5768         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
5769                 rss_hf |= ETH_RSS_FRAG_IPV6;
5770         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
5771                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
5772         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
5773                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
5774         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
5775                 rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
5776         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
5777                 rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
5778         if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
5779                 rss_hf |= ETH_RSS_L2_PAYLOAD;
5780
5781         return rss_hf;
5782 }
5783
5784 /* Disable RSS */
5785 static void
5786 i40e_pf_disable_rss(struct i40e_pf *pf)
5787 {
5788         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5789         uint64_t hena;
5790
5791         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
5792         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
5793         hena &= ~I40E_RSS_HENA_ALL;
5794         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
5795         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
5796         I40E_WRITE_FLUSH(hw);
5797 }
5798
5799 static int
5800 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
5801 {
5802         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
5803         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5804         int ret = 0;
5805
5806         if (!key || key_len == 0) {
5807                 PMD_DRV_LOG(DEBUG, "No key to be configured");
5808                 return 0;
5809         } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
5810                 sizeof(uint32_t)) {
5811                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
5812                 return -EINVAL;
5813         }
5814
5815         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
5816                 struct i40e_aqc_get_set_rss_key_data *key_dw =
5817                         (struct i40e_aqc_get_set_rss_key_data *)key;
5818
5819                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
5820                 if (ret)
5821                         PMD_INIT_LOG(ERR, "Failed to configure RSS key "
5822                                      "via AQ");
5823         } else {
5824                 uint32_t *hash_key = (uint32_t *)key;
5825                 uint16_t i;
5826
5827                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5828                         I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
5829                 I40E_WRITE_FLUSH(hw);
5830         }
5831
5832         return ret;
5833 }
5834
5835 static int
5836 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
5837 {
5838         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
5839         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5840         int ret;
5841
5842         if (!key || !key_len)
5843                 return -EINVAL;
5844
5845         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
5846                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
5847                         (struct i40e_aqc_get_set_rss_key_data *)key);
5848                 if (ret) {
5849                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
5850                         return ret;
5851                 }
5852         } else {
5853                 uint32_t *key_dw = (uint32_t *)key;
5854                 uint16_t i;
5855
5856                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5857                         key_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
5858         }
5859         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
5860
5861         return 0;
5862 }
5863
5864 static int
5865 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
5866 {
5867         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5868         uint64_t rss_hf;
5869         uint64_t hena;
5870         int ret;
5871
5872         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
5873                                rss_conf->rss_key_len);
5874         if (ret)
5875                 return ret;
5876
5877         rss_hf = rss_conf->rss_hf;
5878         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
5879         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
5880         hena &= ~I40E_RSS_HENA_ALL;
5881         hena |= i40e_config_hena(rss_hf);
5882         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
5883         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
5884         I40E_WRITE_FLUSH(hw);
5885
5886         return 0;
5887 }
5888
5889 static int
5890 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
5891                          struct rte_eth_rss_conf *rss_conf)
5892 {
5893         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5894         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5895         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
5896         uint64_t hena;
5897
5898         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
5899         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
5900         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
5901                 if (rss_hf != 0) /* Enable RSS */
5902                         return -EINVAL;
5903                 return 0; /* Nothing to do */
5904         }
5905         /* RSS enabled */
5906         if (rss_hf == 0) /* Disable RSS */
5907                 return -EINVAL;
5908
5909         return i40e_hw_rss_hash_set(pf, rss_conf);
5910 }
5911
5912 static int
5913 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
5914                            struct rte_eth_rss_conf *rss_conf)
5915 {
5916         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5917         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5918         uint64_t hena;
5919
5920         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
5921                          &rss_conf->rss_key_len);
5922
5923         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
5924         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
5925         rss_conf->rss_hf = i40e_parse_hena(hena);
5926
5927         return 0;
5928 }
5929
5930 static int
5931 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
5932 {
5933         switch (filter_type) {
5934         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
5935                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
5936                 break;
5937         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
5938                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
5939                 break;
5940         case RTE_TUNNEL_FILTER_IMAC_TENID:
5941                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
5942                 break;
5943         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
5944                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
5945                 break;
5946         case ETH_TUNNEL_FILTER_IMAC:
5947                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
5948                 break;
5949         case ETH_TUNNEL_FILTER_OIP:
5950                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
5951                 break;
5952         case ETH_TUNNEL_FILTER_IIP:
5953                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
5954                 break;
5955         default:
5956                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
5957                 return -EINVAL;
5958         }
5959
5960         return 0;
5961 }
5962
5963 static int
5964 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
5965                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
5966                         uint8_t add)
5967 {
5968         uint16_t ip_type;
5969         uint8_t i, tun_type = 0;
5970         /* internal varialbe to convert ipv6 byte order */
5971         uint32_t convert_ipv6[4];
5972         int val, ret = 0;
5973         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5974         struct i40e_vsi *vsi = pf->main_vsi;
5975         struct i40e_aqc_add_remove_cloud_filters_element_data  *cld_filter;
5976         struct i40e_aqc_add_remove_cloud_filters_element_data  *pfilter;
5977
5978         cld_filter = rte_zmalloc("tunnel_filter",
5979                 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
5980                 0);
5981
5982         if (NULL == cld_filter) {
5983                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
5984                 return -EINVAL;
5985         }
5986         pfilter = cld_filter;
5987
5988         ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac);
5989         ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac);
5990
5991         pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan);
5992         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
5993                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
5994                 rte_memcpy(&pfilter->ipaddr.v4.data,
5995                                 &rte_cpu_to_le_32(tunnel_filter->ip_addr.ipv4_addr),
5996                                 sizeof(pfilter->ipaddr.v4.data));
5997         } else {
5998                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
5999                 for (i = 0; i < 4; i++) {
6000                         convert_ipv6[i] =
6001                         rte_cpu_to_le_32(tunnel_filter->ip_addr.ipv6_addr[i]);
6002                 }
6003                 rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6,
6004                                 sizeof(pfilter->ipaddr.v6.data));
6005         }
6006
6007         /* check tunneled type */
6008         switch (tunnel_filter->tunnel_type) {
6009         case RTE_TUNNEL_TYPE_VXLAN:
6010                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
6011                 break;
6012         case RTE_TUNNEL_TYPE_NVGRE:
6013                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
6014                 break;
6015         case RTE_TUNNEL_TYPE_IP_IN_GRE:
6016                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
6017                 break;
6018         default:
6019                 /* Other tunnel types is not supported. */
6020                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
6021                 rte_free(cld_filter);
6022                 return -EINVAL;
6023         }
6024
6025         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
6026                                                 &pfilter->flags);
6027         if (val < 0) {
6028                 rte_free(cld_filter);
6029                 return -EINVAL;
6030         }
6031
6032         pfilter->flags |= rte_cpu_to_le_16(
6033                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
6034                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
6035         pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
6036         pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
6037
6038         if (add)
6039                 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
6040         else
6041                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
6042                                                 cld_filter, 1);
6043
6044         rte_free(cld_filter);
6045         return ret;
6046 }
6047
6048 static int
6049 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
6050 {
6051         uint8_t i;
6052
6053         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6054                 if (pf->vxlan_ports[i] == port)
6055                         return i;
6056         }
6057
6058         return -1;
6059 }
6060
6061 static int
6062 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
6063 {
6064         int  idx, ret;
6065         uint8_t filter_idx;
6066         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6067
6068         idx = i40e_get_vxlan_port_idx(pf, port);
6069
6070         /* Check if port already exists */
6071         if (idx >= 0) {
6072                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
6073                 return -EINVAL;
6074         }
6075
6076         /* Now check if there is space to add the new port */
6077         idx = i40e_get_vxlan_port_idx(pf, 0);
6078         if (idx < 0) {
6079                 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
6080                         "not adding port %d", port);
6081                 return -ENOSPC;
6082         }
6083
6084         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
6085                                         &filter_idx, NULL);
6086         if (ret < 0) {
6087                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
6088                 return -1;
6089         }
6090
6091         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
6092                          port,  filter_idx);
6093
6094         /* New port: add it and mark its index in the bitmap */
6095         pf->vxlan_ports[idx] = port;
6096         pf->vxlan_bitmap |= (1 << idx);
6097
6098         if (!(pf->flags & I40E_FLAG_VXLAN))
6099                 pf->flags |= I40E_FLAG_VXLAN;
6100
6101         return 0;
6102 }
6103
6104 static int
6105 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
6106 {
6107         int idx;
6108         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6109
6110         if (!(pf->flags & I40E_FLAG_VXLAN)) {
6111                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
6112                 return -EINVAL;
6113         }
6114
6115         idx = i40e_get_vxlan_port_idx(pf, port);
6116
6117         if (idx < 0) {
6118                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
6119                 return -EINVAL;
6120         }
6121
6122         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
6123                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
6124                 return -1;
6125         }
6126
6127         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
6128                         port, idx);
6129
6130         pf->vxlan_ports[idx] = 0;
6131         pf->vxlan_bitmap &= ~(1 << idx);
6132
6133         if (!pf->vxlan_bitmap)
6134                 pf->flags &= ~I40E_FLAG_VXLAN;
6135
6136         return 0;
6137 }
6138
6139 /* Add UDP tunneling port */
6140 static int
6141 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
6142                              struct rte_eth_udp_tunnel *udp_tunnel)
6143 {
6144         int ret = 0;
6145         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6146
6147         if (udp_tunnel == NULL)
6148                 return -EINVAL;
6149
6150         switch (udp_tunnel->prot_type) {
6151         case RTE_TUNNEL_TYPE_VXLAN:
6152                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
6153                 break;
6154
6155         case RTE_TUNNEL_TYPE_GENEVE:
6156         case RTE_TUNNEL_TYPE_TEREDO:
6157                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
6158                 ret = -1;
6159                 break;
6160
6161         default:
6162                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6163                 ret = -1;
6164                 break;
6165         }
6166
6167         return ret;
6168 }
6169
6170 /* Remove UDP tunneling port */
6171 static int
6172 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
6173                              struct rte_eth_udp_tunnel *udp_tunnel)
6174 {
6175         int ret = 0;
6176         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6177
6178         if (udp_tunnel == NULL)
6179                 return -EINVAL;
6180
6181         switch (udp_tunnel->prot_type) {
6182         case RTE_TUNNEL_TYPE_VXLAN:
6183                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
6184                 break;
6185         case RTE_TUNNEL_TYPE_GENEVE:
6186         case RTE_TUNNEL_TYPE_TEREDO:
6187                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
6188                 ret = -1;
6189                 break;
6190         default:
6191                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6192                 ret = -1;
6193                 break;
6194         }
6195
6196         return ret;
6197 }
6198
6199 /* Calculate the maximum number of contiguous PF queues that are configured */
6200 static int
6201 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
6202 {
6203         struct rte_eth_dev_data *data = pf->dev_data;
6204         int i, num;
6205         struct i40e_rx_queue *rxq;
6206
6207         num = 0;
6208         for (i = 0; i < pf->lan_nb_qps; i++) {
6209                 rxq = data->rx_queues[i];
6210                 if (rxq && rxq->q_set)
6211                         num++;
6212                 else
6213                         break;
6214         }
6215
6216         return num;
6217 }
6218
6219 /* Configure RSS */
6220 static int
6221 i40e_pf_config_rss(struct i40e_pf *pf)
6222 {
6223         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6224         struct rte_eth_rss_conf rss_conf;
6225         uint32_t i, lut = 0;
6226         uint16_t j, num;
6227
6228         /*
6229          * If both VMDQ and RSS enabled, not all of PF queues are configured.
6230          * It's necessary to calulate the actual PF queues that are configured.
6231          */
6232         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
6233                 num = i40e_pf_calc_configured_queues_num(pf);
6234         else
6235                 num = pf->dev_data->nb_rx_queues;
6236
6237         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
6238         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
6239                         num);
6240
6241         if (num == 0) {
6242                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
6243                 return -ENOTSUP;
6244         }
6245
6246         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
6247                 if (j == num)
6248                         j = 0;
6249                 lut = (lut << 8) | (j & ((0x1 <<
6250                         hw->func_caps.rss_table_entry_width) - 1));
6251                 if ((i & 3) == 3)
6252                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
6253         }
6254
6255         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
6256         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
6257                 i40e_pf_disable_rss(pf);
6258                 return 0;
6259         }
6260         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
6261                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
6262                 /* Random default keys */
6263                 static uint32_t rss_key_default[] = {0x6b793944,
6264                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
6265                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
6266                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
6267
6268                 rss_conf.rss_key = (uint8_t *)rss_key_default;
6269                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6270                                                         sizeof(uint32_t);
6271         }
6272
6273         return i40e_hw_rss_hash_set(pf, &rss_conf);
6274 }
6275
6276 static int
6277 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
6278                                struct rte_eth_tunnel_filter_conf *filter)
6279 {
6280         if (pf == NULL || filter == NULL) {
6281                 PMD_DRV_LOG(ERR, "Invalid parameter");
6282                 return -EINVAL;
6283         }
6284
6285         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
6286                 PMD_DRV_LOG(ERR, "Invalid queue ID");
6287                 return -EINVAL;
6288         }
6289
6290         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
6291                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
6292                 return -EINVAL;
6293         }
6294
6295         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
6296                 (is_zero_ether_addr(&filter->outer_mac))) {
6297                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
6298                 return -EINVAL;
6299         }
6300
6301         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
6302                 (is_zero_ether_addr(&filter->inner_mac))) {
6303                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
6304                 return -EINVAL;
6305         }
6306
6307         return 0;
6308 }
6309
6310 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
6311 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
6312 static int
6313 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
6314 {
6315         uint32_t val, reg;
6316         int ret = -EINVAL;
6317
6318         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
6319         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
6320
6321         if (len == 3) {
6322                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
6323         } else if (len == 4) {
6324                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
6325         } else {
6326                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
6327                 return ret;
6328         }
6329
6330         if (reg != val) {
6331                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
6332                                                    reg, NULL);
6333                 if (ret != 0)
6334                         return ret;
6335         } else {
6336                 ret = 0;
6337         }
6338         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
6339                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
6340
6341         return ret;
6342 }
6343
6344 static int
6345 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
6346 {
6347         int ret = -EINVAL;
6348
6349         if (!hw || !cfg)
6350                 return -EINVAL;
6351
6352         switch (cfg->cfg_type) {
6353         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
6354                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
6355                 break;
6356         default:
6357                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
6358                 break;
6359         }
6360
6361         return ret;
6362 }
6363
6364 static int
6365 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
6366                                enum rte_filter_op filter_op,
6367                                void *arg)
6368 {
6369         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6370         int ret = I40E_ERR_PARAM;
6371
6372         switch (filter_op) {
6373         case RTE_ETH_FILTER_SET:
6374                 ret = i40e_dev_global_config_set(hw,
6375                         (struct rte_eth_global_cfg *)arg);
6376                 break;
6377         default:
6378                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
6379                 break;
6380         }
6381
6382         return ret;
6383 }
6384
6385 static int
6386 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
6387                           enum rte_filter_op filter_op,
6388                           void *arg)
6389 {
6390         struct rte_eth_tunnel_filter_conf *filter;
6391         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6392         int ret = I40E_SUCCESS;
6393
6394         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
6395
6396         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
6397                 return I40E_ERR_PARAM;
6398
6399         switch (filter_op) {
6400         case RTE_ETH_FILTER_NOP:
6401                 if (!(pf->flags & I40E_FLAG_VXLAN))
6402                         ret = I40E_NOT_SUPPORTED;
6403                 break;
6404         case RTE_ETH_FILTER_ADD:
6405                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
6406                 break;
6407         case RTE_ETH_FILTER_DELETE:
6408                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
6409                 break;
6410         default:
6411                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
6412                 ret = I40E_ERR_PARAM;
6413                 break;
6414         }
6415
6416         return ret;
6417 }
6418
6419 static int
6420 i40e_pf_config_mq_rx(struct i40e_pf *pf)
6421 {
6422         int ret = 0;
6423         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
6424
6425         /* RSS setup */
6426         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
6427                 ret = i40e_pf_config_rss(pf);
6428         else
6429                 i40e_pf_disable_rss(pf);
6430
6431         return ret;
6432 }
6433
6434 /* Get the symmetric hash enable configurations per port */
6435 static void
6436 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
6437 {
6438         uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
6439
6440         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
6441 }
6442
6443 /* Set the symmetric hash enable configurations per port */
6444 static void
6445 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
6446 {
6447         uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
6448
6449         if (enable > 0) {
6450                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
6451                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
6452                                                         "been enabled");
6453                         return;
6454                 }
6455                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
6456         } else {
6457                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
6458                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
6459                                                         "been disabled");
6460                         return;
6461                 }
6462                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
6463         }
6464         I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg);
6465         I40E_WRITE_FLUSH(hw);
6466 }
6467
6468 /*
6469  * Get global configurations of hash function type and symmetric hash enable
6470  * per flow type (pctype). Note that global configuration means it affects all
6471  * the ports on the same NIC.
6472  */
6473 static int
6474 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
6475                                    struct rte_eth_hash_global_conf *g_cfg)
6476 {
6477         uint32_t reg, mask = I40E_FLOW_TYPES;
6478         uint16_t i;
6479         enum i40e_filter_pctype pctype;
6480
6481         memset(g_cfg, 0, sizeof(*g_cfg));
6482         reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
6483         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
6484                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
6485         else
6486                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
6487         PMD_DRV_LOG(DEBUG, "Hash function is %s",
6488                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
6489
6490         for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
6491                 if (!(mask & (1UL << i)))
6492                         continue;
6493                 mask &= ~(1UL << i);
6494                 /* Bit set indicats the coresponding flow type is supported */
6495                 g_cfg->valid_bit_mask[0] |= (1UL << i);
6496                 pctype = i40e_flowtype_to_pctype(i);
6497                 reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype));
6498                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
6499                         g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
6500         }
6501
6502         return 0;
6503 }
6504
6505 static int
6506 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
6507 {
6508         uint32_t i;
6509         uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
6510
6511         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
6512                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
6513                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
6514                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
6515                                                 g_cfg->hash_func);
6516                 return -EINVAL;
6517         }
6518
6519         /*
6520          * As i40e supports less than 32 flow types, only first 32 bits need to
6521          * be checked.
6522          */
6523         mask0 = g_cfg->valid_bit_mask[0];
6524         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
6525                 if (i == 0) {
6526                         /* Check if any unsupported flow type configured */
6527                         if ((mask0 | i40e_mask) ^ i40e_mask)
6528                                 goto mask_err;
6529                 } else {
6530                         if (g_cfg->valid_bit_mask[i])
6531                                 goto mask_err;
6532                 }
6533         }
6534
6535         return 0;
6536
6537 mask_err:
6538         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
6539
6540         return -EINVAL;
6541 }
6542
6543 /*
6544  * Set global configurations of hash function type and symmetric hash enable
6545  * per flow type (pctype). Note any modifying global configuration will affect
6546  * all the ports on the same NIC.
6547  */
6548 static int
6549 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
6550                                    struct rte_eth_hash_global_conf *g_cfg)
6551 {
6552         int ret;
6553         uint16_t i;
6554         uint32_t reg;
6555         uint32_t mask0 = g_cfg->valid_bit_mask[0];
6556         enum i40e_filter_pctype pctype;
6557
6558         /* Check the input parameters */
6559         ret = i40e_hash_global_config_check(g_cfg);
6560         if (ret < 0)
6561                 return ret;
6562
6563         for (i = 0; mask0 && i < UINT32_BIT; i++) {
6564                 if (!(mask0 & (1UL << i)))
6565                         continue;
6566                 mask0 &= ~(1UL << i);
6567                 pctype = i40e_flowtype_to_pctype(i);
6568                 reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
6569                                 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
6570                 I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg);
6571         }
6572
6573         reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
6574         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
6575                 /* Toeplitz */
6576                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
6577                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
6578                                                                 "Toeplitz");
6579                         goto out;
6580                 }
6581                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
6582         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
6583                 /* Simple XOR */
6584                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
6585                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
6586                                                         "Simple XOR");
6587                         goto out;
6588                 }
6589                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
6590         } else
6591                 /* Use the default, and keep it as it is */
6592                 goto out;
6593
6594         I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg);
6595
6596 out:
6597         I40E_WRITE_FLUSH(hw);
6598
6599         return 0;
6600 }
6601
6602 /**
6603  * Valid input sets for hash and flow director filters per PCTYPE
6604  */
6605 static uint64_t
6606 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
6607                 enum rte_filter_type filter)
6608 {
6609         uint64_t valid;
6610
6611         static const uint64_t valid_hash_inset_table[] = {
6612                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
6613                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6614                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6615                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
6616                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
6617                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6618                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6619                         I40E_INSET_FLEX_PAYLOAD,
6620                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
6621                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6622                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6623                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
6624                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6625                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6626                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6627                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6628                         I40E_INSET_FLEX_PAYLOAD,
6629                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
6630                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6631                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6632                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
6633                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6634                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6635                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6636                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6637                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
6638                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
6639                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6640                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6641                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
6642                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6643                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6644                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6645                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6646                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
6647                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
6648                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6649                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6650                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
6651                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6652                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6653                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6654                         I40E_INSET_FLEX_PAYLOAD,
6655                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
6656                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6657                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6658                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6659                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6660                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
6661                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
6662                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
6663                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
6664                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6665                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6666                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6667                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6668                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
6669                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
6670                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
6671                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
6672                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6673                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6674                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6675                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6676                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
6677                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
6678                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
6679                         I40E_INSET_FLEX_PAYLOAD,
6680                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
6681                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6682                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6683                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6684                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6685                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
6686                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
6687                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
6688                         I40E_INSET_FLEX_PAYLOAD,
6689                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
6690                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6691                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6692                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6693                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6694                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
6695                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
6696                         I40E_INSET_FLEX_PAYLOAD,
6697                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
6698                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6699                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6700                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
6701                         I40E_INSET_FLEX_PAYLOAD,
6702         };
6703
6704         /**
6705          * Flow director supports only fields defined in
6706          * union rte_eth_fdir_flow.
6707          */
6708         static const uint64_t valid_fdir_inset_table[] = {
6709                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
6710                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6711                 I40E_INSET_FLEX_PAYLOAD,
6712                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
6713                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6714                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6715                 I40E_INSET_FLEX_PAYLOAD,
6716                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
6717                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6718                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6719                 I40E_INSET_FLEX_PAYLOAD,
6720                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
6721                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6722                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6723                 I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
6724                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
6725                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6726                 I40E_INSET_FLEX_PAYLOAD,
6727                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
6728                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6729                 I40E_INSET_FLEX_PAYLOAD,
6730                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
6731                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6732                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6733                 I40E_INSET_FLEX_PAYLOAD,
6734                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
6735                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6736                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6737                 I40E_INSET_FLEX_PAYLOAD,
6738                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
6739                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6740                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6741                 I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
6742                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
6743                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6744                 I40E_INSET_FLEX_PAYLOAD,
6745                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
6746                 I40E_INSET_LAST_ETHER_TYPE | I40E_INSET_FLEX_PAYLOAD,
6747         };
6748
6749         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
6750                 return 0;
6751         if (filter == RTE_ETH_FILTER_HASH)
6752                 valid = valid_hash_inset_table[pctype];
6753         else
6754                 valid = valid_fdir_inset_table[pctype];
6755
6756         return valid;
6757 }
6758
6759 /**
6760  * Validate if the input set is allowed for a specific PCTYPE
6761  */
6762 static int
6763 i40e_validate_input_set(enum i40e_filter_pctype pctype,
6764                 enum rte_filter_type filter, uint64_t inset)
6765 {
6766         uint64_t valid;
6767
6768         valid = i40e_get_valid_input_set(pctype, filter);
6769         if (inset & (~valid))
6770                 return -EINVAL;
6771
6772         return 0;
6773 }
6774
6775 /* default input set fields combination per pctype */
6776 static uint64_t
6777 i40e_get_default_input_set(uint16_t pctype)
6778 {
6779         static const uint64_t default_inset_table[] = {
6780                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
6781                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
6782                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
6783                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6784                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
6785                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
6786                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6787                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
6788                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
6789                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6790                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6791                         I40E_INSET_SCTP_VT,
6792                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
6793                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
6794                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
6795                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
6796                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
6797                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6798                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
6799                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
6800                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6801                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
6802                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
6803                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6804                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6805                         I40E_INSET_SCTP_VT,
6806                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
6807                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
6808                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
6809                         I40E_INSET_LAST_ETHER_TYPE,
6810         };
6811
6812         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
6813                 return 0;
6814
6815         return default_inset_table[pctype];
6816 }
6817
6818 /**
6819  * Parse the input set from index to logical bit masks
6820  */
6821 static int
6822 i40e_parse_input_set(uint64_t *inset,
6823                      enum i40e_filter_pctype pctype,
6824                      enum rte_eth_input_set_field *field,
6825                      uint16_t size)
6826 {
6827         uint16_t i, j;
6828         int ret = -EINVAL;
6829
6830         static const struct {
6831                 enum rte_eth_input_set_field field;
6832                 uint64_t inset;
6833         } inset_convert_table[] = {
6834                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
6835                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
6836                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
6837                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
6838                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
6839                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
6840                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
6841                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
6842                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
6843                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
6844                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
6845                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
6846                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
6847                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
6848                         I40E_INSET_IPV6_NEXT_HDR},
6849                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
6850                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
6851                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
6852                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
6853                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
6854                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
6855                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
6856                         I40E_INSET_SCTP_VT},
6857                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
6858                         I40E_INSET_TUNNEL_DMAC},
6859                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
6860                         I40E_INSET_VLAN_TUNNEL},
6861                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
6862                         I40E_INSET_TUNNEL_ID},
6863                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
6864                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
6865                         I40E_INSET_FLEX_PAYLOAD_W1},
6866                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
6867                         I40E_INSET_FLEX_PAYLOAD_W2},
6868                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
6869                         I40E_INSET_FLEX_PAYLOAD_W3},
6870                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
6871                         I40E_INSET_FLEX_PAYLOAD_W4},
6872                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
6873                         I40E_INSET_FLEX_PAYLOAD_W5},
6874                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
6875                         I40E_INSET_FLEX_PAYLOAD_W6},
6876                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
6877                         I40E_INSET_FLEX_PAYLOAD_W7},
6878                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
6879                         I40E_INSET_FLEX_PAYLOAD_W8},
6880         };
6881
6882         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
6883                 return ret;
6884
6885         /* Only one item allowed for default or all */
6886         if (size == 1) {
6887                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
6888                         *inset = i40e_get_default_input_set(pctype);
6889                         return 0;
6890                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
6891                         *inset = I40E_INSET_NONE;
6892                         return 0;
6893                 }
6894         }
6895
6896         for (i = 0, *inset = 0; i < size; i++) {
6897                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
6898                         if (field[i] == inset_convert_table[j].field) {
6899                                 *inset |= inset_convert_table[j].inset;
6900                                 break;
6901                         }
6902                 }
6903
6904                 /* It contains unsupported input set, return immediately */
6905                 if (j == RTE_DIM(inset_convert_table))
6906                         return ret;
6907         }
6908
6909         return 0;
6910 }
6911
6912 /**
6913  * Translate the input set from bit masks to register aware bit masks
6914  * and vice versa
6915  */
6916 static uint64_t
6917 i40e_translate_input_set_reg(uint64_t input)
6918 {
6919         uint64_t val = 0;
6920         uint16_t i;
6921
6922         static const struct {
6923                 uint64_t inset;
6924                 uint64_t inset_reg;
6925         } inset_map[] = {
6926                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
6927                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
6928                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
6929                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
6930                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
6931                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
6932                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
6933                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
6934                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
6935                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
6936                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
6937                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
6938                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
6939                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
6940                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
6941                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
6942                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
6943                 {I40E_INSET_TUNNEL_DMAC,
6944                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
6945                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
6946                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
6947                 {I40E_INSET_TUNNEL_SRC_PORT,
6948                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
6949                 {I40E_INSET_TUNNEL_DST_PORT,
6950                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
6951                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
6952                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
6953                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
6954                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
6955                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
6956                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
6957                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
6958                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
6959                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
6960         };
6961
6962         if (input == 0)
6963                 return val;
6964
6965         /* Translate input set to register aware inset */
6966         for (i = 0; i < RTE_DIM(inset_map); i++) {
6967                 if (input & inset_map[i].inset)
6968                         val |= inset_map[i].inset_reg;
6969         }
6970
6971         return val;
6972 }
6973
6974 static uint8_t
6975 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
6976 {
6977         uint8_t i, idx = 0;
6978
6979         static const struct {
6980                 uint64_t inset;
6981                 uint32_t mask;
6982         } inset_mask_map[] = {
6983                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
6984                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
6985                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
6986                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
6987         };
6988
6989         if (!inset || !mask || !nb_elem)
6990                 return 0;
6991
6992         if (!inset && nb_elem >= I40E_INSET_MASK_NUM_REG) {
6993                 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++)
6994                         mask[i] = 0;
6995                 return I40E_INSET_MASK_NUM_REG;
6996         }
6997
6998         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
6999                 if (idx >= nb_elem)
7000                         break;
7001                 if (inset & inset_mask_map[i].inset) {
7002                         mask[idx] = inset_mask_map[i].mask;
7003                         idx++;
7004                 }
7005         }
7006
7007         return idx;
7008 }
7009
7010 static uint64_t
7011 i40e_get_reg_inset(struct i40e_hw *hw, enum rte_filter_type filter,
7012                             enum i40e_filter_pctype pctype)
7013 {
7014         uint64_t reg = 0;
7015
7016         if (filter == RTE_ETH_FILTER_HASH) {
7017                 reg = I40E_READ_REG(hw, I40E_GLQF_HASH_INSET(1, pctype));
7018                 reg <<= I40E_32_BIT_WIDTH;
7019                 reg |= I40E_READ_REG(hw, I40E_GLQF_HASH_INSET(0, pctype));
7020         } else if (filter == RTE_ETH_FILTER_FDIR) {
7021                 reg = I40E_READ_REG(hw, I40E_PRTQF_FD_INSET(pctype, 1));
7022                 reg <<= I40E_32_BIT_WIDTH;
7023                 reg |= I40E_READ_REG(hw, I40E_PRTQF_FD_INSET(pctype, 0));
7024         }
7025
7026         return reg;
7027 }
7028
7029 static void
7030 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
7031 {
7032         uint32_t reg = I40E_READ_REG(hw, addr);
7033
7034         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
7035         if (reg != val)
7036                 I40E_WRITE_REG(hw, addr, val);
7037         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
7038                     (uint32_t)I40E_READ_REG(hw, addr));
7039 }
7040
7041 static int
7042 i40e_set_hash_inset_mask(struct i40e_hw *hw,
7043                          enum i40e_filter_pctype pctype,
7044                          enum rte_filter_input_set_op op,
7045                          uint32_t *mask_reg,
7046                          uint8_t num)
7047 {
7048         uint32_t reg;
7049         uint8_t i;
7050
7051         if (!mask_reg || num > RTE_ETH_INPUT_SET_SELECT)
7052                 return -EINVAL;
7053
7054         if (op == RTE_ETH_INPUT_SET_SELECT) {
7055                 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
7056                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7057                                              0);
7058                         if (i >= num)
7059                                 continue;
7060                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7061                                              mask_reg[i]);
7062                 }
7063         } else if (op == RTE_ETH_INPUT_SET_ADD) {
7064                 uint8_t j, count = 0;
7065
7066                 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
7067                         reg = I40E_READ_REG(hw, I40E_GLQF_HASH_MSK(i, pctype));
7068                         if (reg & I40E_GLQF_HASH_MSK_FIELD)
7069                                 count++;
7070                 }
7071                 if (count + num > I40E_INSET_MASK_NUM_REG)
7072                         return -EINVAL;
7073
7074                 for (i = count, j = 0; i < I40E_INSET_MASK_NUM_REG; i++, j++)
7075                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7076                                              mask_reg[j]);
7077         }
7078
7079         return 0;
7080 }
7081
7082 static int
7083 i40e_set_fd_inset_mask(struct i40e_hw *hw,
7084                        enum i40e_filter_pctype pctype,
7085                        enum rte_filter_input_set_op op,
7086                        uint32_t *mask_reg,
7087                        uint8_t num)
7088 {
7089         uint32_t reg;
7090         uint8_t i;
7091
7092         if (!mask_reg || num > RTE_ETH_INPUT_SET_SELECT)
7093                 return -EINVAL;
7094
7095         if (op == RTE_ETH_INPUT_SET_SELECT) {
7096                 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
7097                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7098                                              0);
7099                         if (i >= num)
7100                                 continue;
7101                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7102                                              mask_reg[i]);
7103                 }
7104         } else if (op == RTE_ETH_INPUT_SET_ADD) {
7105                 uint8_t j, count = 0;
7106
7107                 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
7108                         reg = I40E_READ_REG(hw, I40E_GLQF_FD_MSK(i, pctype));
7109                         if (reg & I40E_GLQF_FD_MSK_FIELD)
7110                                 count++;
7111                 }
7112                 if (count + num > I40E_INSET_MASK_NUM_REG)
7113                         return -EINVAL;
7114
7115                 for (i = count, j = 0; i < I40E_INSET_MASK_NUM_REG; i++, j++)
7116                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7117                                              mask_reg[j]);
7118         }
7119
7120         return 0;
7121 }
7122
7123 int
7124 i40e_filter_inset_select(struct i40e_hw *hw,
7125                          struct rte_eth_input_set_conf *conf,
7126                          enum rte_filter_type filter)
7127 {
7128         enum i40e_filter_pctype pctype;
7129         uint64_t inset_reg = 0, input_set;
7130         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG];
7131         uint8_t num;
7132         int ret;
7133
7134         if (!hw || !conf) {
7135                 PMD_DRV_LOG(ERR, "Invalid pointer");
7136                 return -EFAULT;
7137         }
7138
7139         pctype = i40e_flowtype_to_pctype(conf->flow_type);
7140         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
7141                 PMD_DRV_LOG(ERR, "Not supported flow type (%u)",
7142                             conf->flow_type);
7143                 return -EINVAL;
7144         }
7145         if (filter != RTE_ETH_FILTER_HASH && filter != RTE_ETH_FILTER_FDIR) {
7146                 PMD_DRV_LOG(ERR, "Not supported filter type (%u)", filter);
7147                 return -EINVAL;
7148         }
7149
7150         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
7151                                    conf->inset_size);
7152         if (ret) {
7153                 PMD_DRV_LOG(ERR, "Failed to parse input set");
7154                 return -EINVAL;
7155         }
7156         if (i40e_validate_input_set(pctype, filter, input_set) != 0) {
7157                 PMD_DRV_LOG(ERR, "Invalid input set");
7158                 return -EINVAL;
7159         }
7160
7161         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
7162                 inset_reg |= i40e_get_reg_inset(hw, filter, pctype);
7163         } else if (conf->op != RTE_ETH_INPUT_SET_SELECT) {
7164                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
7165                 return -EINVAL;
7166         }
7167         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
7168                                            I40E_INSET_MASK_NUM_REG);
7169         inset_reg |= i40e_translate_input_set_reg(input_set);
7170
7171         if (filter == RTE_ETH_FILTER_HASH) {
7172                 ret = i40e_set_hash_inset_mask(hw, pctype, conf->op, mask_reg,
7173                                                num);
7174                 if (ret)
7175                         return -EINVAL;
7176
7177                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
7178                                       (uint32_t)(inset_reg & UINT32_MAX));
7179                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
7180                                      (uint32_t)((inset_reg >>
7181                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
7182         } else if (filter == RTE_ETH_FILTER_FDIR) {
7183                 ret = i40e_set_fd_inset_mask(hw, pctype, conf->op, mask_reg,
7184                                              num);
7185                 if (ret)
7186                         return -EINVAL;
7187
7188                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
7189                                       (uint32_t)(inset_reg & UINT32_MAX));
7190                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
7191                                      (uint32_t)((inset_reg >>
7192                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
7193         } else {
7194                 PMD_DRV_LOG(ERR, "Not supported filter type (%u)", filter);
7195                 return -EINVAL;
7196         }
7197         I40E_WRITE_FLUSH(hw);
7198
7199         return 0;
7200 }
7201
7202 static int
7203 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
7204 {
7205         int ret = 0;
7206
7207         if (!hw || !info) {
7208                 PMD_DRV_LOG(ERR, "Invalid pointer");
7209                 return -EFAULT;
7210         }
7211
7212         switch (info->info_type) {
7213         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
7214                 i40e_get_symmetric_hash_enable_per_port(hw,
7215                                         &(info->info.enable));
7216                 break;
7217         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
7218                 ret = i40e_get_hash_filter_global_config(hw,
7219                                 &(info->info.global_conf));
7220                 break;
7221         default:
7222                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
7223                                                         info->info_type);
7224                 ret = -EINVAL;
7225                 break;
7226         }
7227
7228         return ret;
7229 }
7230
7231 static int
7232 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
7233 {
7234         int ret = 0;
7235
7236         if (!hw || !info) {
7237                 PMD_DRV_LOG(ERR, "Invalid pointer");
7238                 return -EFAULT;
7239         }
7240
7241         switch (info->info_type) {
7242         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
7243                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
7244                 break;
7245         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
7246                 ret = i40e_set_hash_filter_global_config(hw,
7247                                 &(info->info.global_conf));
7248                 break;
7249         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
7250                 ret = i40e_filter_inset_select(hw,
7251                                                &(info->info.input_set_conf),
7252                                                RTE_ETH_FILTER_HASH);
7253                 break;
7254
7255         default:
7256                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
7257                                                         info->info_type);
7258                 ret = -EINVAL;
7259                 break;
7260         }
7261
7262         return ret;
7263 }
7264
7265 /* Operations for hash function */
7266 static int
7267 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
7268                       enum rte_filter_op filter_op,
7269                       void *arg)
7270 {
7271         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7272         int ret = 0;
7273
7274         switch (filter_op) {
7275         case RTE_ETH_FILTER_NOP:
7276                 break;
7277         case RTE_ETH_FILTER_GET:
7278                 ret = i40e_hash_filter_get(hw,
7279                         (struct rte_eth_hash_filter_info *)arg);
7280                 break;
7281         case RTE_ETH_FILTER_SET:
7282                 ret = i40e_hash_filter_set(hw,
7283                         (struct rte_eth_hash_filter_info *)arg);
7284                 break;
7285         default:
7286                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
7287                                                                 filter_op);
7288                 ret = -ENOTSUP;
7289                 break;
7290         }
7291
7292         return ret;
7293 }
7294
7295 /*
7296  * Configure ethertype filter, which can director packet by filtering
7297  * with mac address and ether_type or only ether_type
7298  */
7299 static int
7300 i40e_ethertype_filter_set(struct i40e_pf *pf,
7301                         struct rte_eth_ethertype_filter *filter,
7302                         bool add)
7303 {
7304         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7305         struct i40e_control_filter_stats stats;
7306         uint16_t flags = 0;
7307         int ret;
7308
7309         if (filter->queue >= pf->dev_data->nb_rx_queues) {
7310                 PMD_DRV_LOG(ERR, "Invalid queue ID");
7311                 return -EINVAL;
7312         }
7313         if (filter->ether_type == ETHER_TYPE_IPv4 ||
7314                 filter->ether_type == ETHER_TYPE_IPv6) {
7315                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
7316                         " control packet filter.", filter->ether_type);
7317                 return -EINVAL;
7318         }
7319         if (filter->ether_type == ETHER_TYPE_VLAN)
7320                 PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
7321                         " not supported.");
7322
7323         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
7324                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
7325         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
7326                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
7327         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
7328
7329         memset(&stats, 0, sizeof(stats));
7330         ret = i40e_aq_add_rem_control_packet_filter(hw,
7331                         filter->mac_addr.addr_bytes,
7332                         filter->ether_type, flags,
7333                         pf->main_vsi->seid,
7334                         filter->queue, add, &stats, NULL);
7335
7336         PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
7337                          " mac_etype_used = %u, etype_used = %u,"
7338                          " mac_etype_free = %u, etype_free = %u\n",
7339                          ret, stats.mac_etype_used, stats.etype_used,
7340                          stats.mac_etype_free, stats.etype_free);
7341         if (ret < 0)
7342                 return -ENOSYS;
7343         return 0;
7344 }
7345
7346 /*
7347  * Handle operations for ethertype filter.
7348  */
7349 static int
7350 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
7351                                 enum rte_filter_op filter_op,
7352                                 void *arg)
7353 {
7354         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7355         int ret = 0;
7356
7357         if (filter_op == RTE_ETH_FILTER_NOP)
7358                 return ret;
7359
7360         if (arg == NULL) {
7361                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
7362                             filter_op);
7363                 return -EINVAL;
7364         }
7365
7366         switch (filter_op) {
7367         case RTE_ETH_FILTER_ADD:
7368                 ret = i40e_ethertype_filter_set(pf,
7369                         (struct rte_eth_ethertype_filter *)arg,
7370                         TRUE);
7371                 break;
7372         case RTE_ETH_FILTER_DELETE:
7373                 ret = i40e_ethertype_filter_set(pf,
7374                         (struct rte_eth_ethertype_filter *)arg,
7375                         FALSE);
7376                 break;
7377         default:
7378                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
7379                 ret = -ENOSYS;
7380                 break;
7381         }
7382         return ret;
7383 }
7384
7385 static int
7386 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
7387                      enum rte_filter_type filter_type,
7388                      enum rte_filter_op filter_op,
7389                      void *arg)
7390 {
7391         int ret = 0;
7392
7393         if (dev == NULL)
7394                 return -EINVAL;
7395
7396         switch (filter_type) {
7397         case RTE_ETH_FILTER_NONE:
7398                 /* For global configuration */
7399                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
7400                 break;
7401         case RTE_ETH_FILTER_HASH:
7402                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
7403                 break;
7404         case RTE_ETH_FILTER_MACVLAN:
7405                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
7406                 break;
7407         case RTE_ETH_FILTER_ETHERTYPE:
7408                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
7409                 break;
7410         case RTE_ETH_FILTER_TUNNEL:
7411                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
7412                 break;
7413         case RTE_ETH_FILTER_FDIR:
7414                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
7415                 break;
7416         default:
7417                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
7418                                                         filter_type);
7419                 ret = -EINVAL;
7420                 break;
7421         }
7422
7423         return ret;
7424 }
7425
7426 /*
7427  * Check and enable Extended Tag.
7428  * Enabling Extended Tag is important for 40G performance.
7429  */
7430 static void
7431 i40e_enable_extended_tag(struct rte_eth_dev *dev)
7432 {
7433         uint32_t buf = 0;
7434         int ret;
7435
7436         ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
7437                                       PCI_DEV_CAP_REG);
7438         if (ret < 0) {
7439                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
7440                             PCI_DEV_CAP_REG);
7441                 return;
7442         }
7443         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
7444                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
7445                 return;
7446         }
7447
7448         buf = 0;
7449         ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
7450                                       PCI_DEV_CTRL_REG);
7451         if (ret < 0) {
7452                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
7453                             PCI_DEV_CTRL_REG);
7454                 return;
7455         }
7456         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
7457                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
7458                 return;
7459         }
7460         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
7461         ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf),
7462                                        PCI_DEV_CTRL_REG);
7463         if (ret < 0) {
7464                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
7465                             PCI_DEV_CTRL_REG);
7466                 return;
7467         }
7468 }
7469
7470 /*
7471  * As some registers wouldn't be reset unless a global hardware reset,
7472  * hardware initialization is needed to put those registers into an
7473  * expected initial state.
7474  */
7475 static void
7476 i40e_hw_init(struct rte_eth_dev *dev)
7477 {
7478         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7479
7480         i40e_enable_extended_tag(dev);
7481
7482         /* clear the PF Queue Filter control register */
7483         I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0);
7484
7485         /* Disable symmetric hash per port */
7486         i40e_set_symmetric_hash_enable_per_port(hw, 0);
7487 }
7488
7489 enum i40e_filter_pctype
7490 i40e_flowtype_to_pctype(uint16_t flow_type)
7491 {
7492         static const enum i40e_filter_pctype pctype_table[] = {
7493                 [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
7494                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
7495                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
7496                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
7497                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
7498                 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
7499                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
7500                 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
7501                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
7502                 [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
7503                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
7504                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
7505                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
7506                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
7507                 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
7508                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
7509                 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
7510                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
7511                 [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
7512         };
7513
7514         return pctype_table[flow_type];
7515 }
7516
7517 uint16_t
7518 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
7519 {
7520         static const uint16_t flowtype_table[] = {
7521                 [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
7522                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7523                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
7524                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7525                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
7526                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7527                         RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
7528                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7529                         RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
7530                 [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
7531                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7532                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
7533                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
7534                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
7535                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
7536                         RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
7537                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
7538                         RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
7539                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
7540         };
7541
7542         return flowtype_table[pctype];
7543 }
7544
7545 /*
7546  * On X710, performance number is far from the expectation on recent firmware
7547  * versions; on XL710, performance number is also far from the expectation on
7548  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
7549  * mode is enabled and port MAC address is equal to the packet destination MAC
7550  * address. The fix for this issue may not be integrated in the following
7551  * firmware version. So the workaround in software driver is needed. It needs
7552  * to modify the initial values of 3 internal only registers for both X710 and
7553  * XL710. Note that the values for X710 or XL710 could be different, and the
7554  * workaround can be removed when it is fixed in firmware in the future.
7555  */
7556
7557 /* For both X710 and XL710 */
7558 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
7559 #define I40E_GL_SWR_PRI_JOIN_MAP_0       0x26CE00
7560
7561 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
7562 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
7563
7564 /* For X710 */
7565 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
7566 /* For XL710 */
7567 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
7568 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
7569
7570 static void
7571 i40e_configure_registers(struct i40e_hw *hw)
7572 {
7573         static struct {
7574                 uint32_t addr;
7575                 uint64_t val;
7576         } reg_table[] = {
7577                 {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
7578                 {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
7579                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
7580         };
7581         uint64_t reg;
7582         uint32_t i;
7583         int ret;
7584
7585         for (i = 0; i < RTE_DIM(reg_table); i++) {
7586                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
7587                         if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
7588                                 reg_table[i].val =
7589                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
7590                         else /* For X710 */
7591                                 reg_table[i].val =
7592                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
7593                 }
7594
7595                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
7596                                                         &reg, NULL);
7597                 if (ret < 0) {
7598                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
7599                                                         reg_table[i].addr);
7600                         break;
7601                 }
7602                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
7603                                                 reg_table[i].addr, reg);
7604                 if (reg == reg_table[i].val)
7605                         continue;
7606
7607                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
7608                                                 reg_table[i].val, NULL);
7609                 if (ret < 0) {
7610                         PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
7611                                 "address of 0x%"PRIx32, reg_table[i].val,
7612                                                         reg_table[i].addr);
7613                         break;
7614                 }
7615                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
7616                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
7617         }
7618 }
7619
7620 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
7621 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
7622 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
7623 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
7624 static int
7625 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
7626 {
7627         uint32_t reg;
7628         int ret;
7629
7630         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
7631                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
7632                 return -EINVAL;
7633         }
7634
7635         /* Configure for double VLAN RX stripping */
7636         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
7637         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
7638                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
7639                 ret = i40e_aq_debug_write_register(hw,
7640                                                    I40E_VSI_TSR(vsi->vsi_id),
7641                                                    reg, NULL);
7642                 if (ret < 0) {
7643                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
7644                                     vsi->vsi_id);
7645                         return I40E_ERR_CONFIG;
7646                 }
7647         }
7648
7649         /* Configure for double VLAN TX insertion */
7650         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
7651         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
7652                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
7653                 ret = i40e_aq_debug_write_register(hw,
7654                                                    I40E_VSI_L2TAGSTXVALID(
7655                                                    vsi->vsi_id), reg, NULL);
7656                 if (ret < 0) {
7657                         PMD_DRV_LOG(ERR, "Failed to update "
7658                                 "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
7659                         return I40E_ERR_CONFIG;
7660                 }
7661         }
7662
7663         return 0;
7664 }
7665
7666 /**
7667  * i40e_aq_add_mirror_rule
7668  * @hw: pointer to the hardware structure
7669  * @seid: VEB seid to add mirror rule to
7670  * @dst_id: destination vsi seid
7671  * @entries: Buffer which contains the entities to be mirrored
7672  * @count: number of entities contained in the buffer
7673  * @rule_id:the rule_id of the rule to be added
7674  *
7675  * Add a mirror rule for a given veb.
7676  *
7677  **/
7678 static enum i40e_status_code
7679 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
7680                         uint16_t seid, uint16_t dst_id,
7681                         uint16_t rule_type, uint16_t *entries,
7682                         uint16_t count, uint16_t *rule_id)
7683 {
7684         struct i40e_aq_desc desc;
7685         struct i40e_aqc_add_delete_mirror_rule cmd;
7686         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
7687                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
7688                 &desc.params.raw;
7689         uint16_t buff_len;
7690         enum i40e_status_code status;
7691
7692         i40e_fill_default_direct_cmd_desc(&desc,
7693                                           i40e_aqc_opc_add_mirror_rule);
7694         memset(&cmd, 0, sizeof(cmd));
7695
7696         buff_len = sizeof(uint16_t) * count;
7697         desc.datalen = rte_cpu_to_le_16(buff_len);
7698         if (buff_len > 0)
7699                 desc.flags |= rte_cpu_to_le_16(
7700                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
7701         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
7702                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
7703         cmd.num_entries = rte_cpu_to_le_16(count);
7704         cmd.seid = rte_cpu_to_le_16(seid);
7705         cmd.destination = rte_cpu_to_le_16(dst_id);
7706
7707         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
7708         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
7709         PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
7710                          "rule_id = %u"
7711                          " mirror_rules_used = %u, mirror_rules_free = %u,",
7712                          hw->aq.asq_last_status, resp->rule_id,
7713                          resp->mirror_rules_used, resp->mirror_rules_free);
7714         *rule_id = rte_le_to_cpu_16(resp->rule_id);
7715
7716         return status;
7717 }
7718
7719 /**
7720  * i40e_aq_del_mirror_rule
7721  * @hw: pointer to the hardware structure
7722  * @seid: VEB seid to add mirror rule to
7723  * @entries: Buffer which contains the entities to be mirrored
7724  * @count: number of entities contained in the buffer
7725  * @rule_id:the rule_id of the rule to be delete
7726  *
7727  * Delete a mirror rule for a given veb.
7728  *
7729  **/
7730 static enum i40e_status_code
7731 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
7732                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
7733                 uint16_t count, uint16_t rule_id)
7734 {
7735         struct i40e_aq_desc desc;
7736         struct i40e_aqc_add_delete_mirror_rule cmd;
7737         uint16_t buff_len = 0;
7738         enum i40e_status_code status;
7739         void *buff = NULL;
7740
7741         i40e_fill_default_direct_cmd_desc(&desc,
7742                                           i40e_aqc_opc_delete_mirror_rule);
7743         memset(&cmd, 0, sizeof(cmd));
7744         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
7745                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
7746                                                           I40E_AQ_FLAG_RD));
7747                 cmd.num_entries = count;
7748                 buff_len = sizeof(uint16_t) * count;
7749                 desc.datalen = rte_cpu_to_le_16(buff_len);
7750                 buff = (void *)entries;
7751         } else
7752                 /* rule id is filled in destination field for deleting mirror rule */
7753                 cmd.destination = rte_cpu_to_le_16(rule_id);
7754
7755         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
7756                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
7757         cmd.seid = rte_cpu_to_le_16(seid);
7758
7759         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
7760         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
7761
7762         return status;
7763 }
7764
7765 /**
7766  * i40e_mirror_rule_set
7767  * @dev: pointer to the hardware structure
7768  * @mirror_conf: mirror rule info
7769  * @sw_id: mirror rule's sw_id
7770  * @on: enable/disable
7771  *
7772  * set a mirror rule.
7773  *
7774  **/
7775 static int
7776 i40e_mirror_rule_set(struct rte_eth_dev *dev,
7777                         struct rte_eth_mirror_conf *mirror_conf,
7778                         uint8_t sw_id, uint8_t on)
7779 {
7780         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7781         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7782         struct i40e_mirror_rule *it, *mirr_rule = NULL;
7783         struct i40e_mirror_rule *parent = NULL;
7784         uint16_t seid, dst_seid, rule_id;
7785         uint16_t i, j = 0;
7786         int ret;
7787
7788         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
7789
7790         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
7791                 PMD_DRV_LOG(ERR, "mirror rule can not be configured"
7792                         " without veb or vfs.");
7793                 return -ENOSYS;
7794         }
7795         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
7796                 PMD_DRV_LOG(ERR, "mirror table is full.");
7797                 return -ENOSPC;
7798         }
7799         if (mirror_conf->dst_pool > pf->vf_num) {
7800                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
7801                                  mirror_conf->dst_pool);
7802                 return -EINVAL;
7803         }
7804
7805         seid = pf->main_vsi->veb->seid;
7806
7807         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
7808                 if (sw_id <= it->index) {
7809                         mirr_rule = it;
7810                         break;
7811                 }
7812                 parent = it;
7813         }
7814         if (mirr_rule && sw_id == mirr_rule->index) {
7815                 if (on) {
7816                         PMD_DRV_LOG(ERR, "mirror rule exists.");
7817                         return -EEXIST;
7818                 } else {
7819                         ret = i40e_aq_del_mirror_rule(hw, seid,
7820                                         mirr_rule->rule_type,
7821                                         mirr_rule->entries,
7822                                         mirr_rule->num_entries, mirr_rule->id);
7823                         if (ret < 0) {
7824                                 PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
7825                                                    " ret = %d, aq_err = %d.",
7826                                                    ret, hw->aq.asq_last_status);
7827                                 return -ENOSYS;
7828                         }
7829                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
7830                         rte_free(mirr_rule);
7831                         pf->nb_mirror_rule--;
7832                         return 0;
7833                 }
7834         } else if (!on) {
7835                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
7836                 return -ENOENT;
7837         }
7838
7839         mirr_rule = rte_zmalloc("i40e_mirror_rule",
7840                                 sizeof(struct i40e_mirror_rule) , 0);
7841         if (!mirr_rule) {
7842                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7843                 return I40E_ERR_NO_MEMORY;
7844         }
7845         switch (mirror_conf->rule_type) {
7846         case ETH_MIRROR_VLAN:
7847                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
7848                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
7849                                 mirr_rule->entries[j] =
7850                                         mirror_conf->vlan.vlan_id[i];
7851                                 j++;
7852                         }
7853                 }
7854                 if (j == 0) {
7855                         PMD_DRV_LOG(ERR, "vlan is not specified.");
7856                         rte_free(mirr_rule);
7857                         return -EINVAL;
7858                 }
7859                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
7860                 break;
7861         case ETH_MIRROR_VIRTUAL_POOL_UP:
7862         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
7863                 /* check if the specified pool bit is out of range */
7864                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
7865                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
7866                         rte_free(mirr_rule);
7867                         return -EINVAL;
7868                 }
7869                 for (i = 0, j = 0; i < pf->vf_num; i++) {
7870                         if (mirror_conf->pool_mask & (1ULL << i)) {
7871                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
7872                                 j++;
7873                         }
7874                 }
7875                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
7876                         /* add pf vsi to entries */
7877                         mirr_rule->entries[j] = pf->main_vsi_seid;
7878                         j++;
7879                 }
7880                 if (j == 0) {
7881                         PMD_DRV_LOG(ERR, "pool is not specified.");
7882                         rte_free(mirr_rule);
7883                         return -EINVAL;
7884                 }
7885                 /* egress and ingress in aq commands means from switch but not port */
7886                 mirr_rule->rule_type =
7887                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
7888                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
7889                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
7890                 break;
7891         case ETH_MIRROR_UPLINK_PORT:
7892                 /* egress and ingress in aq commands means from switch but not port*/
7893                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
7894                 break;
7895         case ETH_MIRROR_DOWNLINK_PORT:
7896                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
7897                 break;
7898         default:
7899                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
7900                         mirror_conf->rule_type);
7901                 rte_free(mirr_rule);
7902                 return -EINVAL;
7903         }
7904
7905         /* If the dst_pool is equal to vf_num, consider it as PF */
7906         if (mirror_conf->dst_pool == pf->vf_num)
7907                 dst_seid = pf->main_vsi_seid;
7908         else
7909                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
7910
7911         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
7912                                       mirr_rule->rule_type, mirr_rule->entries,
7913                                       j, &rule_id);
7914         if (ret < 0) {
7915                 PMD_DRV_LOG(ERR, "failed to add mirror rule:"
7916                                    " ret = %d, aq_err = %d.",
7917                                    ret, hw->aq.asq_last_status);
7918                 rte_free(mirr_rule);
7919                 return -ENOSYS;
7920         }
7921
7922         mirr_rule->index = sw_id;
7923         mirr_rule->num_entries = j;
7924         mirr_rule->id = rule_id;
7925         mirr_rule->dst_vsi_seid = dst_seid;
7926
7927         if (parent)
7928                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
7929         else
7930                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
7931
7932         pf->nb_mirror_rule++;
7933         return 0;
7934 }
7935
7936 /**
7937  * i40e_mirror_rule_reset
7938  * @dev: pointer to the device
7939  * @sw_id: mirror rule's sw_id
7940  *
7941  * reset a mirror rule.
7942  *
7943  **/
7944 static int
7945 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
7946 {
7947         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7948         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7949         struct i40e_mirror_rule *it, *mirr_rule = NULL;
7950         uint16_t seid;
7951         int ret;
7952
7953         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
7954
7955         seid = pf->main_vsi->veb->seid;
7956
7957         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
7958                 if (sw_id == it->index) {
7959                         mirr_rule = it;
7960                         break;
7961                 }
7962         }
7963         if (mirr_rule) {
7964                 ret = i40e_aq_del_mirror_rule(hw, seid,
7965                                 mirr_rule->rule_type,
7966                                 mirr_rule->entries,
7967                                 mirr_rule->num_entries, mirr_rule->id);
7968                 if (ret < 0) {
7969                         PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
7970                                            " status = %d, aq_err = %d.",
7971                                            ret, hw->aq.asq_last_status);
7972                         return -ENOSYS;
7973                 }
7974                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
7975                 rte_free(mirr_rule);
7976                 pf->nb_mirror_rule--;
7977         } else {
7978                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
7979                 return -ENOENT;
7980         }
7981         return 0;
7982 }
7983
7984 static uint64_t
7985 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
7986 {
7987         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7988         uint64_t systim_cycles;
7989
7990         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
7991         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
7992                         << 32;
7993
7994         return systim_cycles;
7995 }
7996
7997 static uint64_t
7998 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
7999 {
8000         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8001         uint64_t rx_tstamp;
8002
8003         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
8004         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
8005                         << 32;
8006
8007         return rx_tstamp;
8008 }
8009
8010 static uint64_t
8011 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
8012 {
8013         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8014         uint64_t tx_tstamp;
8015
8016         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
8017         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
8018                         << 32;
8019
8020         return tx_tstamp;
8021 }
8022
8023 static void
8024 i40e_start_timecounters(struct rte_eth_dev *dev)
8025 {
8026         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8027         struct i40e_adapter *adapter =
8028                         (struct i40e_adapter *)dev->data->dev_private;
8029         struct rte_eth_link link;
8030         uint32_t tsync_inc_l;
8031         uint32_t tsync_inc_h;
8032
8033         /* Get current link speed. */
8034         memset(&link, 0, sizeof(link));
8035         i40e_dev_link_update(dev, 1);
8036         rte_i40e_dev_atomic_read_link_status(dev, &link);
8037
8038         switch (link.link_speed) {
8039         case ETH_LINK_SPEED_40G:
8040                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
8041                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
8042                 break;
8043         case ETH_LINK_SPEED_10G:
8044                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
8045                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
8046                 break;
8047         case ETH_LINK_SPEED_1000:
8048                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
8049                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
8050                 break;
8051         default:
8052                 tsync_inc_l = 0x0;
8053                 tsync_inc_h = 0x0;
8054         }
8055
8056         /* Set the timesync increment value. */
8057         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
8058         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
8059
8060         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
8061         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
8062         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
8063
8064         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
8065         adapter->systime_tc.cc_shift = 0;
8066         adapter->systime_tc.nsec_mask = 0;
8067
8068         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
8069         adapter->rx_tstamp_tc.cc_shift = 0;
8070         adapter->rx_tstamp_tc.nsec_mask = 0;
8071
8072         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
8073         adapter->tx_tstamp_tc.cc_shift = 0;
8074         adapter->tx_tstamp_tc.nsec_mask = 0;
8075 }
8076
8077 static int
8078 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
8079 {
8080         struct i40e_adapter *adapter =
8081                         (struct i40e_adapter *)dev->data->dev_private;
8082
8083         adapter->systime_tc.nsec += delta;
8084         adapter->rx_tstamp_tc.nsec += delta;
8085         adapter->tx_tstamp_tc.nsec += delta;
8086
8087         return 0;
8088 }
8089
8090 static int
8091 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
8092 {
8093         uint64_t ns;
8094         struct i40e_adapter *adapter =
8095                         (struct i40e_adapter *)dev->data->dev_private;
8096
8097         ns = rte_timespec_to_ns(ts);
8098
8099         /* Set the timecounters to a new value. */
8100         adapter->systime_tc.nsec = ns;
8101         adapter->rx_tstamp_tc.nsec = ns;
8102         adapter->tx_tstamp_tc.nsec = ns;
8103
8104         return 0;
8105 }
8106
8107 static int
8108 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
8109 {
8110         uint64_t ns, systime_cycles;
8111         struct i40e_adapter *adapter =
8112                         (struct i40e_adapter *)dev->data->dev_private;
8113
8114         systime_cycles = i40e_read_systime_cyclecounter(dev);
8115         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
8116         *ts = rte_ns_to_timespec(ns);
8117
8118         return 0;
8119 }
8120
8121 static int
8122 i40e_timesync_enable(struct rte_eth_dev *dev)
8123 {
8124         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8125         uint32_t tsync_ctl_l;
8126         uint32_t tsync_ctl_h;
8127
8128         /* Stop the timesync system time. */
8129         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
8130         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
8131         /* Reset the timesync system time value. */
8132         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
8133         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
8134
8135         i40e_start_timecounters(dev);
8136
8137         /* Clear timesync registers. */
8138         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
8139         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
8140         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
8141         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
8142         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
8143         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
8144
8145         /* Enable timestamping of PTP packets. */
8146         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
8147         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
8148
8149         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
8150         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
8151         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
8152
8153         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
8154         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
8155
8156         return 0;
8157 }
8158
8159 static int
8160 i40e_timesync_disable(struct rte_eth_dev *dev)
8161 {
8162         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8163         uint32_t tsync_ctl_l;
8164         uint32_t tsync_ctl_h;
8165
8166         /* Disable timestamping of transmitted PTP packets. */
8167         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
8168         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
8169
8170         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
8171         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
8172
8173         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
8174         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
8175
8176         /* Reset the timesync increment value. */
8177         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
8178         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
8179
8180         return 0;
8181 }
8182
8183 static int
8184 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
8185                                 struct timespec *timestamp, uint32_t flags)
8186 {
8187         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8188         struct i40e_adapter *adapter =
8189                 (struct i40e_adapter *)dev->data->dev_private;
8190
8191         uint32_t sync_status;
8192         uint32_t index = flags & 0x03;
8193         uint64_t rx_tstamp_cycles;
8194         uint64_t ns;
8195
8196         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
8197         if ((sync_status & (1 << index)) == 0)
8198                 return -EINVAL;
8199
8200         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
8201         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
8202         *timestamp = rte_ns_to_timespec(ns);
8203
8204         return 0;
8205 }
8206
8207 static int
8208 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
8209                                 struct timespec *timestamp)
8210 {
8211         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8212         struct i40e_adapter *adapter =
8213                 (struct i40e_adapter *)dev->data->dev_private;
8214
8215         uint32_t sync_status;
8216         uint64_t tx_tstamp_cycles;
8217         uint64_t ns;
8218
8219         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
8220         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
8221                 return -EINVAL;
8222
8223         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
8224         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
8225         *timestamp = rte_ns_to_timespec(ns);
8226
8227         return 0;
8228 }
8229
8230 /*
8231  * i40e_parse_dcb_configure - parse dcb configure from user
8232  * @dev: the device being configured
8233  * @dcb_cfg: pointer of the result of parse
8234  * @*tc_map: bit map of enabled traffic classes
8235  *
8236  * Returns 0 on success, negative value on failure
8237  */
8238 static int
8239 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
8240                          struct i40e_dcbx_config *dcb_cfg,
8241                          uint8_t *tc_map)
8242 {
8243         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
8244         uint8_t i, tc_bw, bw_lf;
8245
8246         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
8247
8248         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
8249         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
8250                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
8251                 return -EINVAL;
8252         }
8253
8254         /* assume each tc has the same bw */
8255         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
8256         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
8257                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
8258         /* to ensure the sum of tcbw is equal to 100 */
8259         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
8260         for (i = 0; i < bw_lf; i++)
8261                 dcb_cfg->etscfg.tcbwtable[i]++;
8262
8263         /* assume each tc has the same Transmission Selection Algorithm */
8264         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
8265                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
8266
8267         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
8268                 dcb_cfg->etscfg.prioritytable[i] =
8269                                 dcb_rx_conf->dcb_tc[i];
8270
8271         /* FW needs one App to configure HW */
8272         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
8273         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
8274         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
8275         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
8276
8277         if (dcb_rx_conf->nb_tcs == 0)
8278                 *tc_map = 1; /* tc0 only */
8279         else
8280                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
8281
8282         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
8283                 dcb_cfg->pfc.willing = 0;
8284                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
8285                 dcb_cfg->pfc.pfcenable = *tc_map;
8286         }
8287         return 0;
8288 }
8289
8290
8291 static enum i40e_status_code
8292 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
8293                               struct i40e_aqc_vsi_properties_data *info,
8294                               uint8_t enabled_tcmap)
8295 {
8296         enum i40e_status_code ret;
8297         int i, total_tc = 0;
8298         uint16_t qpnum_per_tc, bsf, qp_idx;
8299         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
8300
8301         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
8302         if (ret != I40E_SUCCESS)
8303                 return ret;
8304
8305         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8306                 if (enabled_tcmap & (1 << i))
8307                         total_tc++;
8308         }
8309         if (total_tc == 0)
8310                 total_tc = 1;
8311         vsi->enabled_tc = enabled_tcmap;
8312
8313         qpnum_per_tc = dev_data->nb_rx_queues / total_tc;
8314         /* Number of queues per enabled TC */
8315         if (qpnum_per_tc == 0) {
8316                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
8317                 return I40E_ERR_INVALID_QP_ID;
8318         }
8319         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
8320                                 I40E_MAX_Q_PER_TC);
8321         bsf = rte_bsf32(qpnum_per_tc);
8322
8323         /**
8324          * Configure TC and queue mapping parameters, for enabled TC,
8325          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
8326          * default queue will serve it.
8327          */
8328         qp_idx = 0;
8329         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8330                 if (vsi->enabled_tc & (1 << i)) {
8331                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
8332                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
8333                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
8334                         qp_idx += qpnum_per_tc;
8335                 } else
8336                         info->tc_mapping[i] = 0;
8337         }
8338
8339         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
8340         if (vsi->type == I40E_VSI_SRIOV) {
8341                 info->mapping_flags |=
8342                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
8343                 for (i = 0; i < vsi->nb_qps; i++)
8344                         info->queue_mapping[i] =
8345                                 rte_cpu_to_le_16(vsi->base_queue + i);
8346         } else {
8347                 info->mapping_flags |=
8348                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
8349                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
8350         }
8351         info->valid_sections |=
8352                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
8353
8354         return I40E_SUCCESS;
8355 }
8356
8357 /*
8358  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
8359  * @vsi: VSI to be configured
8360  * @tc_map: enabled TC bitmap
8361  *
8362  * Returns 0 on success, negative value on failure
8363  */
8364 static enum i40e_status_code
8365 i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 tc_map)
8366 {
8367         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
8368         struct i40e_vsi_context ctxt;
8369         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
8370         enum i40e_status_code ret = I40E_SUCCESS;
8371         int i;
8372
8373         /* Check if enabled_tc is same as existing or new TCs */
8374         if (vsi->enabled_tc == tc_map)
8375                 return ret;
8376
8377         /* configure tc bandwidth */
8378         memset(&bw_data, 0, sizeof(bw_data));
8379         bw_data.tc_valid_bits = tc_map;
8380         /* Enable ETS TCs with equal BW Share for now across all VSIs */
8381         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8382                 if (tc_map & BIT_ULL(i))
8383                         bw_data.tc_bw_credits[i] = 1;
8384         }
8385         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
8386         if (ret) {
8387                 PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation"
8388                         " per TC failed = %d",
8389                         hw->aq.asq_last_status);
8390                 goto out;
8391         }
8392         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
8393                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
8394
8395         /* Update Queue Pairs Mapping for currently enabled UPs */
8396         ctxt.seid = vsi->seid;
8397         ctxt.pf_num = hw->pf_id;
8398         ctxt.vf_num = 0;
8399         ctxt.uplink_seid = vsi->uplink_seid;
8400         ctxt.info = vsi->info;
8401         i40e_get_cap(hw);
8402         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
8403         if (ret)
8404                 goto out;
8405
8406         /* Update the VSI after updating the VSI queue-mapping information */
8407         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
8408         if (ret) {
8409                 PMD_INIT_LOG(ERR, "Failed to configure "
8410                             "TC queue mapping = %d",
8411                             hw->aq.asq_last_status);
8412                 goto out;
8413         }
8414         /* update the local VSI info with updated queue map */
8415         (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
8416                                         sizeof(vsi->info.tc_mapping));
8417         (void)rte_memcpy(&vsi->info.queue_mapping,
8418                         &ctxt.info.queue_mapping,
8419                 sizeof(vsi->info.queue_mapping));
8420         vsi->info.mapping_flags = ctxt.info.mapping_flags;
8421         vsi->info.valid_sections = 0;
8422
8423         /* query and update current VSI BW information */
8424         ret = i40e_vsi_get_bw_config(vsi);
8425         if (ret) {
8426                 PMD_INIT_LOG(ERR,
8427                          "Failed updating vsi bw info, err %s aq_err %s",
8428                          i40e_stat_str(hw, ret),
8429                          i40e_aq_str(hw, hw->aq.asq_last_status));
8430                 goto out;
8431         }
8432
8433         vsi->enabled_tc = tc_map;
8434
8435 out:
8436         return ret;
8437 }
8438
8439 /*
8440  * i40e_dcb_hw_configure - program the dcb setting to hw
8441  * @pf: pf the configuration is taken on
8442  * @new_cfg: new configuration
8443  * @tc_map: enabled TC bitmap
8444  *
8445  * Returns 0 on success, negative value on failure
8446  */
8447 static enum i40e_status_code
8448 i40e_dcb_hw_configure(struct i40e_pf *pf,
8449                       struct i40e_dcbx_config *new_cfg,
8450                       uint8_t tc_map)
8451 {
8452         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8453         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
8454         struct i40e_vsi *main_vsi = pf->main_vsi;
8455         struct i40e_vsi_list *vsi_list;
8456         enum i40e_status_code ret;
8457         int i;
8458         uint32_t val;
8459
8460         /* Use the FW API if FW > v4.4*/
8461         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
8462               (hw->aq.fw_maj_ver >= 5))) {
8463                 PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API"
8464                                   " to configure DCB");
8465                 return I40E_ERR_FIRMWARE_API_VERSION;
8466         }
8467
8468         /* Check if need reconfiguration */
8469         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
8470                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
8471                 return I40E_SUCCESS;
8472         }
8473
8474         /* Copy the new config to the current config */
8475         *old_cfg = *new_cfg;
8476         old_cfg->etsrec = old_cfg->etscfg;
8477         ret = i40e_set_dcb_config(hw);
8478         if (ret) {
8479                 PMD_INIT_LOG(ERR,
8480                          "Set DCB Config failed, err %s aq_err %s\n",
8481                          i40e_stat_str(hw, ret),
8482                          i40e_aq_str(hw, hw->aq.asq_last_status));
8483                 return ret;
8484         }
8485         /* set receive Arbiter to RR mode and ETS scheme by default */
8486         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
8487                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
8488                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
8489                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
8490                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
8491                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
8492                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
8493                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
8494                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
8495                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
8496                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
8497                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
8498                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
8499         }
8500         /* get local mib to check whether it is configured correctly */
8501         /* IEEE mode */
8502         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
8503         /* Get Local DCB Config */
8504         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
8505                                      &hw->local_dcbx_config);
8506
8507         /* Update each VSI */
8508         i40e_vsi_config_tc(main_vsi, tc_map);
8509         if (main_vsi->veb) {
8510                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
8511                         /* Beside main VSI, only enable default
8512                          * TC for other VSIs
8513                          */
8514                         ret = i40e_vsi_config_tc(vsi_list->vsi,
8515                                                 I40E_DEFAULT_TCMAP);
8516                         if (ret)
8517                                 PMD_INIT_LOG(WARNING,
8518                                          "Failed configuring TC for VSI seid=%d\n",
8519                                          vsi_list->vsi->seid);
8520                         /* continue */
8521                 }
8522         }
8523         return I40E_SUCCESS;
8524 }
8525
8526 /*
8527  * i40e_dcb_init_configure - initial dcb config
8528  * @dev: device being configured
8529  * @sw_dcb: indicate whether dcb is sw configured or hw offload
8530  *
8531  * Returns 0 on success, negative value on failure
8532  */
8533 static int
8534 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
8535 {
8536         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8537         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8538         int ret = 0;
8539
8540         if ((pf->flags & I40E_FLAG_DCB) == 0) {
8541                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
8542                 return -ENOTSUP;
8543         }
8544
8545         /* DCB initialization:
8546          * Update DCB configuration from the Firmware and configure
8547          * LLDP MIB change event.
8548          */
8549         if (sw_dcb == TRUE) {
8550                 ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
8551                 if (ret != I40E_SUCCESS)
8552                         PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
8553
8554                 ret = i40e_init_dcb(hw);
8555                 /* if sw_dcb, lldp agent is stopped, the return from
8556                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
8557                  * adminq status.
8558                  */
8559                 if (ret != I40E_SUCCESS &&
8560                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
8561                         memset(&hw->local_dcbx_config, 0,
8562                                 sizeof(struct i40e_dcbx_config));
8563                         /* set dcb default configuration */
8564                         hw->local_dcbx_config.etscfg.willing = 0;
8565                         hw->local_dcbx_config.etscfg.maxtcs = 0;
8566                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
8567                         hw->local_dcbx_config.etscfg.tsatable[0] =
8568                                                 I40E_IEEE_TSA_ETS;
8569                         hw->local_dcbx_config.etsrec =
8570                                 hw->local_dcbx_config.etscfg;
8571                         hw->local_dcbx_config.pfc.willing = 0;
8572                         hw->local_dcbx_config.pfc.pfccap =
8573                                                 I40E_MAX_TRAFFIC_CLASS;
8574                         /* FW needs one App to configure HW */
8575                         hw->local_dcbx_config.numapps = 1;
8576                         hw->local_dcbx_config.app[0].selector =
8577                                                 I40E_APP_SEL_ETHTYPE;
8578                         hw->local_dcbx_config.app[0].priority = 3;
8579                         hw->local_dcbx_config.app[0].protocolid =
8580                                                 I40E_APP_PROTOID_FCOE;
8581                         ret = i40e_set_dcb_config(hw);
8582                         if (ret) {
8583                                 PMD_INIT_LOG(ERR, "default dcb config fails."
8584                                         " err = %d, aq_err = %d.", ret,
8585                                           hw->aq.asq_last_status);
8586                                 return -ENOSYS;
8587                         }
8588                 } else {
8589                         PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
8590                                           " aq_err = %d.", ret,
8591                                           hw->aq.asq_last_status);
8592                         return -ENOTSUP;
8593                 }
8594         } else {
8595                 ret = i40e_aq_start_lldp(hw, NULL);
8596                 if (ret != I40E_SUCCESS)
8597                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
8598
8599                 ret = i40e_init_dcb(hw);
8600                 if (!ret) {
8601                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
8602                                 PMD_INIT_LOG(ERR, "HW doesn't support"
8603                                                   " DCBX offload.");
8604                                 return -ENOTSUP;
8605                         }
8606                 } else {
8607                         PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
8608                                           " aq_err = %d.", ret,
8609                                           hw->aq.asq_last_status);
8610                         return -ENOTSUP;
8611                 }
8612         }
8613         return 0;
8614 }
8615
8616 /*
8617  * i40e_dcb_setup - setup dcb related config
8618  * @dev: device being configured
8619  *
8620  * Returns 0 on success, negative value on failure
8621  */
8622 static int
8623 i40e_dcb_setup(struct rte_eth_dev *dev)
8624 {
8625         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8626         struct i40e_dcbx_config dcb_cfg;
8627         uint8_t tc_map = 0;
8628         int ret = 0;
8629
8630         if ((pf->flags & I40E_FLAG_DCB) == 0) {
8631                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
8632                 return -ENOTSUP;
8633         }
8634
8635         if (pf->vf_num != 0 ||
8636             (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
8637                 PMD_INIT_LOG(DEBUG, " DCB only works on main vsi.");
8638
8639         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
8640         if (ret) {
8641                 PMD_INIT_LOG(ERR, "invalid dcb config");
8642                 return -EINVAL;
8643         }
8644         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
8645         if (ret) {
8646                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
8647                 return -ENOSYS;
8648         }
8649
8650         return 0;
8651 }
8652
8653 static int
8654 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
8655                       struct rte_eth_dcb_info *dcb_info)
8656 {
8657         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8658         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8659         struct i40e_vsi *vsi = pf->main_vsi;
8660         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
8661         uint16_t bsf, tc_mapping;
8662         int i;
8663
8664         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
8665                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
8666         else
8667                 dcb_info->nb_tcs = 1;
8668         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
8669                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
8670         for (i = 0; i < dcb_info->nb_tcs; i++)
8671                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
8672
8673         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8674                 if (vsi->enabled_tc & (1 << i)) {
8675                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
8676                         /* only main vsi support multi TCs */
8677                         dcb_info->tc_queue.tc_rxq[0][i].base =
8678                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
8679                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
8680                         dcb_info->tc_queue.tc_txq[0][i].base =
8681                                 dcb_info->tc_queue.tc_rxq[0][i].base;
8682                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
8683                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
8684                         dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 1 << bsf;
8685                         dcb_info->tc_queue.tc_txq[0][i].nb_queue =
8686                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue;
8687                 }
8688         }
8689
8690         return 0;
8691 }
8692
8693 static int
8694 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
8695 {
8696         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
8697         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8698         uint16_t interval =
8699                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
8700         uint16_t msix_intr;
8701
8702         msix_intr = intr_handle->intr_vec[queue_id];
8703         if (msix_intr == I40E_MISC_VEC_ID)
8704                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
8705                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
8706                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
8707                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
8708                                (interval <<
8709                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
8710         else
8711                 I40E_WRITE_REG(hw,
8712                                I40E_PFINT_DYN_CTLN(msix_intr -
8713                                                    I40E_RX_VEC_START),
8714                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
8715                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
8716                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
8717                                (interval <<
8718                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
8719
8720         I40E_WRITE_FLUSH(hw);
8721         rte_intr_enable(&dev->pci_dev->intr_handle);
8722
8723         return 0;
8724 }
8725
8726 static int
8727 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
8728 {
8729         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
8730         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8731         uint16_t msix_intr;
8732
8733         msix_intr = intr_handle->intr_vec[queue_id];
8734         if (msix_intr == I40E_MISC_VEC_ID)
8735                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
8736         else
8737                 I40E_WRITE_REG(hw,
8738                                I40E_PFINT_DYN_CTLN(msix_intr -
8739                                                    I40E_RX_VEC_START),
8740                                0);
8741         I40E_WRITE_FLUSH(hw);
8742
8743         return 0;
8744 }