b50ab6a607b0c66068dedb5062c380603a1b70b3
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40 #include <inttypes.h>
41 #include <assert.h>
42
43 #include <rte_eal.h>
44 #include <rte_string_fns.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_ethdev_pci.h>
49 #include <rte_memzone.h>
50 #include <rte_malloc.h>
51 #include <rte_memcpy.h>
52 #include <rte_alarm.h>
53 #include <rte_dev.h>
54 #include <rte_eth_ctrl.h>
55 #include <rte_tailq.h>
56 #include <rte_hash_crc.h>
57
58 #include "i40e_logs.h"
59 #include "base/i40e_prototype.h"
60 #include "base/i40e_adminq_cmd.h"
61 #include "base/i40e_type.h"
62 #include "base/i40e_register.h"
63 #include "base/i40e_dcb.h"
64 #include "i40e_ethdev.h"
65 #include "i40e_rxtx.h"
66 #include "i40e_pf.h"
67 #include "i40e_regs.h"
68
69 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
70 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
71
72 #define I40E_CLEAR_PXE_WAIT_MS     200
73
74 /* Maximun number of capability elements */
75 #define I40E_MAX_CAP_ELE_NUM       128
76
77 /* Wait count and interval */
78 #define I40E_CHK_Q_ENA_COUNT       1000
79 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
80
81 /* Maximun number of VSI */
82 #define I40E_MAX_NUM_VSIS          (384UL)
83
84 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
85
86 /* Flow control default timer */
87 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
88
89 /* Flow control default high water */
90 #define I40E_DEFAULT_HIGH_WATER (0x1C40/1024)
91
92 /* Flow control default low water */
93 #define I40E_DEFAULT_LOW_WATER  (0x1A40/1024)
94
95 /* Flow control enable fwd bit */
96 #define I40E_PRTMAC_FWD_CTRL   0x00000001
97
98 /* Receive Packet Buffer size */
99 #define I40E_RXPBSIZE (968 * 1024)
100
101 /* Kilobytes shift */
102 #define I40E_KILOSHIFT 10
103
104 /* Receive Average Packet Size in Byte*/
105 #define I40E_PACKET_AVERAGE_SIZE 128
106
107 /* Mask of PF interrupt causes */
108 #define I40E_PFINT_ICR0_ENA_MASK ( \
109                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
110                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
111                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
112                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
113                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
114                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
115                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
116                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
117                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
118
119 #define I40E_FLOW_TYPES ( \
120         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
121         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
122         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
123         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
124         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
125         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
126         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
127         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
128         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
129         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
130         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
131
132 /* Additional timesync values. */
133 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
134 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
135 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
136 #define I40E_PRTTSYN_TSYNENA     0x80000000
137 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
138 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
139
140 #define I40E_MAX_PERCENT            100
141 #define I40E_DEFAULT_DCB_APP_NUM    1
142 #define I40E_DEFAULT_DCB_APP_PRIO   3
143
144 /**
145  * Below are values for writing un-exposed registers suggested
146  * by silicon experts
147  */
148 /* Destination MAC address */
149 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
150 /* Source MAC address */
151 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
152 /* Outer (S-Tag) VLAN tag in the outer L2 header */
153 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
154 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
155 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
156 /* Single VLAN tag in the inner L2 header */
157 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
158 /* Source IPv4 address */
159 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
160 /* Destination IPv4 address */
161 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
162 /* Source IPv4 address for X722 */
163 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
164 /* Destination IPv4 address for X722 */
165 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
166 /* IPv4 Protocol for X722 */
167 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
168 /* IPv4 Time to Live for X722 */
169 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
170 /* IPv4 Type of Service (TOS) */
171 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
172 /* IPv4 Protocol */
173 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
174 /* IPv4 Time to Live */
175 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
176 /* Source IPv6 address */
177 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
178 /* Destination IPv6 address */
179 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
180 /* IPv6 Traffic Class (TC) */
181 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
182 /* IPv6 Next Header */
183 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
184 /* IPv6 Hop Limit */
185 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
186 /* Source L4 port */
187 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
188 /* Destination L4 port */
189 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
190 /* SCTP verification tag */
191 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
192 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
193 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
194 /* Source port of tunneling UDP */
195 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
196 /* Destination port of tunneling UDP */
197 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
198 /* UDP Tunneling ID, NVGRE/GRE key */
199 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
200 /* Last ether type */
201 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
202 /* Tunneling outer destination IPv4 address */
203 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
204 /* Tunneling outer destination IPv6 address */
205 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
206 /* 1st word of flex payload */
207 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
208 /* 2nd word of flex payload */
209 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
210 /* 3rd word of flex payload */
211 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
212 /* 4th word of flex payload */
213 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
214 /* 5th word of flex payload */
215 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
216 /* 6th word of flex payload */
217 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
218 /* 7th word of flex payload */
219 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
220 /* 8th word of flex payload */
221 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
222 /* all 8 words flex payload */
223 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
224 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
225
226 #define I40E_TRANSLATE_INSET 0
227 #define I40E_TRANSLATE_REG   1
228
229 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
230 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
231 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
232 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
233 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
234 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
235
236 /* PCI offset for querying capability */
237 #define PCI_DEV_CAP_REG            0xA4
238 /* PCI offset for enabling/disabling Extended Tag */
239 #define PCI_DEV_CTRL_REG           0xA8
240 /* Bit mask of Extended Tag capability */
241 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
242 /* Bit shift of Extended Tag enable/disable */
243 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
244 /* Bit mask of Extended Tag enable/disable */
245 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
246
247 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
248 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
249 static int i40e_dev_configure(struct rte_eth_dev *dev);
250 static int i40e_dev_start(struct rte_eth_dev *dev);
251 static void i40e_dev_stop(struct rte_eth_dev *dev);
252 static void i40e_dev_close(struct rte_eth_dev *dev);
253 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
254 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
255 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
256 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
257 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
258 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
259 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
260                                struct rte_eth_stats *stats);
261 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
262                                struct rte_eth_xstat *xstats, unsigned n);
263 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
264                                      struct rte_eth_xstat_name *xstats_names,
265                                      unsigned limit);
266 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
267 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
268                                             uint16_t queue_id,
269                                             uint8_t stat_idx,
270                                             uint8_t is_rx);
271 static int i40e_fw_version_get(struct rte_eth_dev *dev,
272                                 char *fw_version, size_t fw_size);
273 static void i40e_dev_info_get(struct rte_eth_dev *dev,
274                               struct rte_eth_dev_info *dev_info);
275 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
276                                 uint16_t vlan_id,
277                                 int on);
278 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
279                               enum rte_vlan_type vlan_type,
280                               uint16_t tpid);
281 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
282 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
283                                       uint16_t queue,
284                                       int on);
285 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
286 static int i40e_dev_led_on(struct rte_eth_dev *dev);
287 static int i40e_dev_led_off(struct rte_eth_dev *dev);
288 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
289                               struct rte_eth_fc_conf *fc_conf);
290 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
291                               struct rte_eth_fc_conf *fc_conf);
292 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
293                                        struct rte_eth_pfc_conf *pfc_conf);
294 static int i40e_macaddr_add(struct rte_eth_dev *dev,
295                             struct ether_addr *mac_addr,
296                             uint32_t index,
297                             uint32_t pool);
298 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
299 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
300                                     struct rte_eth_rss_reta_entry64 *reta_conf,
301                                     uint16_t reta_size);
302 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
303                                    struct rte_eth_rss_reta_entry64 *reta_conf,
304                                    uint16_t reta_size);
305
306 static int i40e_get_cap(struct i40e_hw *hw);
307 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
308 static int i40e_pf_setup(struct i40e_pf *pf);
309 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
310 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
311 static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
312 static int i40e_dcb_setup(struct rte_eth_dev *dev);
313 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
314                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
315 static void i40e_stat_update_48(struct i40e_hw *hw,
316                                uint32_t hireg,
317                                uint32_t loreg,
318                                bool offset_loaded,
319                                uint64_t *offset,
320                                uint64_t *stat);
321 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
322 static void i40e_dev_interrupt_handler(void *param);
323 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
324                                 uint32_t base, uint32_t num);
325 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
326 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
327                         uint32_t base);
328 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
329                         uint16_t num);
330 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
331 static int i40e_veb_release(struct i40e_veb *veb);
332 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
333                                                 struct i40e_vsi *vsi);
334 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
335 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
336 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
337                                              struct i40e_macvlan_filter *mv_f,
338                                              int num,
339                                              uint16_t vlan);
340 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
341 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
342                                     struct rte_eth_rss_conf *rss_conf);
343 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
344                                       struct rte_eth_rss_conf *rss_conf);
345 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
346                                         struct rte_eth_udp_tunnel *udp_tunnel);
347 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
348                                         struct rte_eth_udp_tunnel *udp_tunnel);
349 static void i40e_filter_input_set_init(struct i40e_pf *pf);
350 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
351                                 enum rte_filter_op filter_op,
352                                 void *arg);
353 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
354                                 enum rte_filter_type filter_type,
355                                 enum rte_filter_op filter_op,
356                                 void *arg);
357 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
358                                   struct rte_eth_dcb_info *dcb_info);
359 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
360 static void i40e_configure_registers(struct i40e_hw *hw);
361 static void i40e_hw_init(struct rte_eth_dev *dev);
362 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
363 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
364                         struct rte_eth_mirror_conf *mirror_conf,
365                         uint8_t sw_id, uint8_t on);
366 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
367
368 static int i40e_timesync_enable(struct rte_eth_dev *dev);
369 static int i40e_timesync_disable(struct rte_eth_dev *dev);
370 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
371                                            struct timespec *timestamp,
372                                            uint32_t flags);
373 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
374                                            struct timespec *timestamp);
375 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
376
377 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
378
379 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
380                                    struct timespec *timestamp);
381 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
382                                     const struct timespec *timestamp);
383
384 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
385                                          uint16_t queue_id);
386 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
387                                           uint16_t queue_id);
388
389 static int i40e_get_regs(struct rte_eth_dev *dev,
390                          struct rte_dev_reg_info *regs);
391
392 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
393
394 static int i40e_get_eeprom(struct rte_eth_dev *dev,
395                            struct rte_dev_eeprom_info *eeprom);
396
397 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
398                                       struct ether_addr *mac_addr);
399
400 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
401
402 static int i40e_ethertype_filter_convert(
403         const struct rte_eth_ethertype_filter *input,
404         struct i40e_ethertype_filter *filter);
405 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
406                                    struct i40e_ethertype_filter *filter);
407
408 static int i40e_tunnel_filter_convert(
409         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
410         struct i40e_tunnel_filter *tunnel_filter);
411 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
412                                 struct i40e_tunnel_filter *tunnel_filter);
413 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
414
415 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
416 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
417 static void i40e_filter_restore(struct i40e_pf *pf);
418 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
419
420 int i40e_logtype_init;
421 int i40e_logtype_driver;
422
423 static const struct rte_pci_id pci_id_i40e_map[] = {
424         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
425         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
426         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
427         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
428         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
429         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
430         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
431         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
432         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
433         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
434         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
435         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
436         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
437         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
438         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
439         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
440         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
441         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
442         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
443         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
444         { .vendor_id = 0, /* sentinel */ },
445 };
446
447 static const struct eth_dev_ops i40e_eth_dev_ops = {
448         .dev_configure                = i40e_dev_configure,
449         .dev_start                    = i40e_dev_start,
450         .dev_stop                     = i40e_dev_stop,
451         .dev_close                    = i40e_dev_close,
452         .promiscuous_enable           = i40e_dev_promiscuous_enable,
453         .promiscuous_disable          = i40e_dev_promiscuous_disable,
454         .allmulticast_enable          = i40e_dev_allmulticast_enable,
455         .allmulticast_disable         = i40e_dev_allmulticast_disable,
456         .dev_set_link_up              = i40e_dev_set_link_up,
457         .dev_set_link_down            = i40e_dev_set_link_down,
458         .link_update                  = i40e_dev_link_update,
459         .stats_get                    = i40e_dev_stats_get,
460         .xstats_get                   = i40e_dev_xstats_get,
461         .xstats_get_names             = i40e_dev_xstats_get_names,
462         .stats_reset                  = i40e_dev_stats_reset,
463         .xstats_reset                 = i40e_dev_stats_reset,
464         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
465         .fw_version_get               = i40e_fw_version_get,
466         .dev_infos_get                = i40e_dev_info_get,
467         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
468         .vlan_filter_set              = i40e_vlan_filter_set,
469         .vlan_tpid_set                = i40e_vlan_tpid_set,
470         .vlan_offload_set             = i40e_vlan_offload_set,
471         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
472         .vlan_pvid_set                = i40e_vlan_pvid_set,
473         .rx_queue_start               = i40e_dev_rx_queue_start,
474         .rx_queue_stop                = i40e_dev_rx_queue_stop,
475         .tx_queue_start               = i40e_dev_tx_queue_start,
476         .tx_queue_stop                = i40e_dev_tx_queue_stop,
477         .rx_queue_setup               = i40e_dev_rx_queue_setup,
478         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
479         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
480         .rx_queue_release             = i40e_dev_rx_queue_release,
481         .rx_queue_count               = i40e_dev_rx_queue_count,
482         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
483         .rx_descriptor_status         = i40e_dev_rx_descriptor_status,
484         .tx_descriptor_status         = i40e_dev_tx_descriptor_status,
485         .tx_queue_setup               = i40e_dev_tx_queue_setup,
486         .tx_queue_release             = i40e_dev_tx_queue_release,
487         .dev_led_on                   = i40e_dev_led_on,
488         .dev_led_off                  = i40e_dev_led_off,
489         .flow_ctrl_get                = i40e_flow_ctrl_get,
490         .flow_ctrl_set                = i40e_flow_ctrl_set,
491         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
492         .mac_addr_add                 = i40e_macaddr_add,
493         .mac_addr_remove              = i40e_macaddr_remove,
494         .reta_update                  = i40e_dev_rss_reta_update,
495         .reta_query                   = i40e_dev_rss_reta_query,
496         .rss_hash_update              = i40e_dev_rss_hash_update,
497         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
498         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
499         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
500         .filter_ctrl                  = i40e_dev_filter_ctrl,
501         .rxq_info_get                 = i40e_rxq_info_get,
502         .txq_info_get                 = i40e_txq_info_get,
503         .mirror_rule_set              = i40e_mirror_rule_set,
504         .mirror_rule_reset            = i40e_mirror_rule_reset,
505         .timesync_enable              = i40e_timesync_enable,
506         .timesync_disable             = i40e_timesync_disable,
507         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
508         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
509         .get_dcb_info                 = i40e_dev_get_dcb_info,
510         .timesync_adjust_time         = i40e_timesync_adjust_time,
511         .timesync_read_time           = i40e_timesync_read_time,
512         .timesync_write_time          = i40e_timesync_write_time,
513         .get_reg                      = i40e_get_regs,
514         .get_eeprom_length            = i40e_get_eeprom_length,
515         .get_eeprom                   = i40e_get_eeprom,
516         .mac_addr_set                 = i40e_set_default_mac_addr,
517         .mtu_set                      = i40e_dev_mtu_set,
518         .tm_ops_get                   = i40e_tm_ops_get,
519 };
520
521 /* store statistics names and its offset in stats structure */
522 struct rte_i40e_xstats_name_off {
523         char name[RTE_ETH_XSTATS_NAME_SIZE];
524         unsigned offset;
525 };
526
527 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
528         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
529         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
530         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
531         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
532         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
533                 rx_unknown_protocol)},
534         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
535         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
536         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
537         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
538 };
539
540 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
541                 sizeof(rte_i40e_stats_strings[0]))
542
543 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
544         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
545                 tx_dropped_link_down)},
546         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
547         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
548                 illegal_bytes)},
549         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
550         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
551                 mac_local_faults)},
552         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
553                 mac_remote_faults)},
554         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
555                 rx_length_errors)},
556         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
557         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
558         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
559         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
560         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
561         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
562                 rx_size_127)},
563         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
564                 rx_size_255)},
565         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
566                 rx_size_511)},
567         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
568                 rx_size_1023)},
569         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
570                 rx_size_1522)},
571         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
572                 rx_size_big)},
573         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
574                 rx_undersize)},
575         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
576                 rx_oversize)},
577         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
578                 mac_short_packet_dropped)},
579         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
580                 rx_fragments)},
581         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
582         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
583         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
584                 tx_size_127)},
585         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
586                 tx_size_255)},
587         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
588                 tx_size_511)},
589         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
590                 tx_size_1023)},
591         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
592                 tx_size_1522)},
593         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
594                 tx_size_big)},
595         {"rx_flow_director_atr_match_packets",
596                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
597         {"rx_flow_director_sb_match_packets",
598                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
599         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
600                 tx_lpi_status)},
601         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
602                 rx_lpi_status)},
603         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
604                 tx_lpi_count)},
605         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
606                 rx_lpi_count)},
607 };
608
609 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
610                 sizeof(rte_i40e_hw_port_strings[0]))
611
612 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
613         {"xon_packets", offsetof(struct i40e_hw_port_stats,
614                 priority_xon_rx)},
615         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
616                 priority_xoff_rx)},
617 };
618
619 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
620                 sizeof(rte_i40e_rxq_prio_strings[0]))
621
622 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
623         {"xon_packets", offsetof(struct i40e_hw_port_stats,
624                 priority_xon_tx)},
625         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
626                 priority_xoff_tx)},
627         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
628                 priority_xon_2_xoff)},
629 };
630
631 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
632                 sizeof(rte_i40e_txq_prio_strings[0]))
633
634 static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
635         struct rte_pci_device *pci_dev)
636 {
637         return rte_eth_dev_pci_generic_probe(pci_dev,
638                 sizeof(struct i40e_adapter), eth_i40e_dev_init);
639 }
640
641 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
642 {
643         return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
644 }
645
646 static struct rte_pci_driver rte_i40e_pmd = {
647         .id_table = pci_id_i40e_map,
648         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
649         .probe = eth_i40e_pci_probe,
650         .remove = eth_i40e_pci_remove,
651 };
652
653 static inline int
654 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
655                                      struct rte_eth_link *link)
656 {
657         struct rte_eth_link *dst = link;
658         struct rte_eth_link *src = &(dev->data->dev_link);
659
660         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
661                                         *(uint64_t *)src) == 0)
662                 return -1;
663
664         return 0;
665 }
666
667 static inline int
668 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
669                                       struct rte_eth_link *link)
670 {
671         struct rte_eth_link *dst = &(dev->data->dev_link);
672         struct rte_eth_link *src = link;
673
674         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
675                                         *(uint64_t *)src) == 0)
676                 return -1;
677
678         return 0;
679 }
680
681 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
682 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
683 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
684
685 #ifndef I40E_GLQF_ORT
686 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
687 #endif
688 #ifndef I40E_GLQF_PIT
689 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
690 #endif
691 #ifndef I40E_GLQF_L3_MAP
692 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
693 #endif
694
695 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
696 {
697         /*
698          * Initialize registers for flexible payload, which should be set by NVM.
699          * This should be removed from code once it is fixed in NVM.
700          */
701         I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
702         I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
703         I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
704         I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
705         I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
706         I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
707         I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
708         I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
709         I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
710         I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
711         I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
712         I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
713
714         /* Initialize registers for parsing packet type of QinQ */
715         I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
716         I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
717 }
718
719 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
720
721 /*
722  * Add a ethertype filter to drop all flow control frames transmitted
723  * from VSIs.
724 */
725 static void
726 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
727 {
728         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
729         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
730                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
731                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
732         int ret;
733
734         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
735                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
736                                 pf->main_vsi_seid, 0,
737                                 TRUE, NULL, NULL);
738         if (ret)
739                 PMD_INIT_LOG(ERR,
740                         "Failed to add filter to drop flow control frames from VSIs.");
741 }
742
743 static int
744 floating_veb_list_handler(__rte_unused const char *key,
745                           const char *floating_veb_value,
746                           void *opaque)
747 {
748         int idx = 0;
749         unsigned int count = 0;
750         char *end = NULL;
751         int min, max;
752         bool *vf_floating_veb = opaque;
753
754         while (isblank(*floating_veb_value))
755                 floating_veb_value++;
756
757         /* Reset floating VEB configuration for VFs */
758         for (idx = 0; idx < I40E_MAX_VF; idx++)
759                 vf_floating_veb[idx] = false;
760
761         min = I40E_MAX_VF;
762         do {
763                 while (isblank(*floating_veb_value))
764                         floating_veb_value++;
765                 if (*floating_veb_value == '\0')
766                         return -1;
767                 errno = 0;
768                 idx = strtoul(floating_veb_value, &end, 10);
769                 if (errno || end == NULL)
770                         return -1;
771                 while (isblank(*end))
772                         end++;
773                 if (*end == '-') {
774                         min = idx;
775                 } else if ((*end == ';') || (*end == '\0')) {
776                         max = idx;
777                         if (min == I40E_MAX_VF)
778                                 min = idx;
779                         if (max >= I40E_MAX_VF)
780                                 max = I40E_MAX_VF - 1;
781                         for (idx = min; idx <= max; idx++) {
782                                 vf_floating_veb[idx] = true;
783                                 count++;
784                         }
785                         min = I40E_MAX_VF;
786                 } else {
787                         return -1;
788                 }
789                 floating_veb_value = end + 1;
790         } while (*end != '\0');
791
792         if (count == 0)
793                 return -1;
794
795         return 0;
796 }
797
798 static void
799 config_vf_floating_veb(struct rte_devargs *devargs,
800                        uint16_t floating_veb,
801                        bool *vf_floating_veb)
802 {
803         struct rte_kvargs *kvlist;
804         int i;
805         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
806
807         if (!floating_veb)
808                 return;
809         /* All the VFs attach to the floating VEB by default
810          * when the floating VEB is enabled.
811          */
812         for (i = 0; i < I40E_MAX_VF; i++)
813                 vf_floating_veb[i] = true;
814
815         if (devargs == NULL)
816                 return;
817
818         kvlist = rte_kvargs_parse(devargs->args, NULL);
819         if (kvlist == NULL)
820                 return;
821
822         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
823                 rte_kvargs_free(kvlist);
824                 return;
825         }
826         /* When the floating_veb_list parameter exists, all the VFs
827          * will attach to the legacy VEB firstly, then configure VFs
828          * to the floating VEB according to the floating_veb_list.
829          */
830         if (rte_kvargs_process(kvlist, floating_veb_list,
831                                floating_veb_list_handler,
832                                vf_floating_veb) < 0) {
833                 rte_kvargs_free(kvlist);
834                 return;
835         }
836         rte_kvargs_free(kvlist);
837 }
838
839 static int
840 i40e_check_floating_handler(__rte_unused const char *key,
841                             const char *value,
842                             __rte_unused void *opaque)
843 {
844         if (strcmp(value, "1"))
845                 return -1;
846
847         return 0;
848 }
849
850 static int
851 is_floating_veb_supported(struct rte_devargs *devargs)
852 {
853         struct rte_kvargs *kvlist;
854         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
855
856         if (devargs == NULL)
857                 return 0;
858
859         kvlist = rte_kvargs_parse(devargs->args, NULL);
860         if (kvlist == NULL)
861                 return 0;
862
863         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
864                 rte_kvargs_free(kvlist);
865                 return 0;
866         }
867         /* Floating VEB is enabled when there's key-value:
868          * enable_floating_veb=1
869          */
870         if (rte_kvargs_process(kvlist, floating_veb_key,
871                                i40e_check_floating_handler, NULL) < 0) {
872                 rte_kvargs_free(kvlist);
873                 return 0;
874         }
875         rte_kvargs_free(kvlist);
876
877         return 1;
878 }
879
880 static void
881 config_floating_veb(struct rte_eth_dev *dev)
882 {
883         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
884         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
885         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
886
887         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
888
889         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
890                 pf->floating_veb =
891                         is_floating_veb_supported(pci_dev->device.devargs);
892                 config_vf_floating_veb(pci_dev->device.devargs,
893                                        pf->floating_veb,
894                                        pf->floating_veb_list);
895         } else {
896                 pf->floating_veb = false;
897         }
898 }
899
900 #define I40E_L2_TAGS_S_TAG_SHIFT 1
901 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
902
903 static int
904 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
905 {
906         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
907         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
908         char ethertype_hash_name[RTE_HASH_NAMESIZE];
909         int ret;
910
911         struct rte_hash_parameters ethertype_hash_params = {
912                 .name = ethertype_hash_name,
913                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
914                 .key_len = sizeof(struct i40e_ethertype_filter_input),
915                 .hash_func = rte_hash_crc,
916                 .hash_func_init_val = 0,
917                 .socket_id = rte_socket_id(),
918         };
919
920         /* Initialize ethertype filter rule list and hash */
921         TAILQ_INIT(&ethertype_rule->ethertype_list);
922         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
923                  "ethertype_%s", dev->device->name);
924         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
925         if (!ethertype_rule->hash_table) {
926                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
927                 return -EINVAL;
928         }
929         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
930                                        sizeof(struct i40e_ethertype_filter *) *
931                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
932                                        0);
933         if (!ethertype_rule->hash_map) {
934                 PMD_INIT_LOG(ERR,
935                              "Failed to allocate memory for ethertype hash map!");
936                 ret = -ENOMEM;
937                 goto err_ethertype_hash_map_alloc;
938         }
939
940         return 0;
941
942 err_ethertype_hash_map_alloc:
943         rte_hash_free(ethertype_rule->hash_table);
944
945         return ret;
946 }
947
948 static int
949 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
950 {
951         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
952         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
953         char tunnel_hash_name[RTE_HASH_NAMESIZE];
954         int ret;
955
956         struct rte_hash_parameters tunnel_hash_params = {
957                 .name = tunnel_hash_name,
958                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
959                 .key_len = sizeof(struct i40e_tunnel_filter_input),
960                 .hash_func = rte_hash_crc,
961                 .hash_func_init_val = 0,
962                 .socket_id = rte_socket_id(),
963         };
964
965         /* Initialize tunnel filter rule list and hash */
966         TAILQ_INIT(&tunnel_rule->tunnel_list);
967         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
968                  "tunnel_%s", dev->device->name);
969         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
970         if (!tunnel_rule->hash_table) {
971                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
972                 return -EINVAL;
973         }
974         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
975                                     sizeof(struct i40e_tunnel_filter *) *
976                                     I40E_MAX_TUNNEL_FILTER_NUM,
977                                     0);
978         if (!tunnel_rule->hash_map) {
979                 PMD_INIT_LOG(ERR,
980                              "Failed to allocate memory for tunnel hash map!");
981                 ret = -ENOMEM;
982                 goto err_tunnel_hash_map_alloc;
983         }
984
985         return 0;
986
987 err_tunnel_hash_map_alloc:
988         rte_hash_free(tunnel_rule->hash_table);
989
990         return ret;
991 }
992
993 static int
994 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
995 {
996         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
997         struct i40e_fdir_info *fdir_info = &pf->fdir;
998         char fdir_hash_name[RTE_HASH_NAMESIZE];
999         int ret;
1000
1001         struct rte_hash_parameters fdir_hash_params = {
1002                 .name = fdir_hash_name,
1003                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1004                 .key_len = sizeof(struct rte_eth_fdir_input),
1005                 .hash_func = rte_hash_crc,
1006                 .hash_func_init_val = 0,
1007                 .socket_id = rte_socket_id(),
1008         };
1009
1010         /* Initialize flow director filter rule list and hash */
1011         TAILQ_INIT(&fdir_info->fdir_list);
1012         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1013                  "fdir_%s", dev->device->name);
1014         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1015         if (!fdir_info->hash_table) {
1016                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1017                 return -EINVAL;
1018         }
1019         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1020                                           sizeof(struct i40e_fdir_filter *) *
1021                                           I40E_MAX_FDIR_FILTER_NUM,
1022                                           0);
1023         if (!fdir_info->hash_map) {
1024                 PMD_INIT_LOG(ERR,
1025                              "Failed to allocate memory for fdir hash map!");
1026                 ret = -ENOMEM;
1027                 goto err_fdir_hash_map_alloc;
1028         }
1029         return 0;
1030
1031 err_fdir_hash_map_alloc:
1032         rte_hash_free(fdir_info->hash_table);
1033
1034         return ret;
1035 }
1036
1037 static int
1038 eth_i40e_dev_init(struct rte_eth_dev *dev)
1039 {
1040         struct rte_pci_device *pci_dev;
1041         struct rte_intr_handle *intr_handle;
1042         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1043         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1044         struct i40e_vsi *vsi;
1045         int ret;
1046         uint32_t len;
1047         uint8_t aq_fail = 0;
1048
1049         PMD_INIT_FUNC_TRACE();
1050
1051         dev->dev_ops = &i40e_eth_dev_ops;
1052         dev->rx_pkt_burst = i40e_recv_pkts;
1053         dev->tx_pkt_burst = i40e_xmit_pkts;
1054         dev->tx_pkt_prepare = i40e_prep_pkts;
1055
1056         /* for secondary processes, we don't initialise any further as primary
1057          * has already done this work. Only check we don't need a different
1058          * RX function */
1059         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1060                 i40e_set_rx_function(dev);
1061                 i40e_set_tx_function(dev);
1062                 return 0;
1063         }
1064         i40e_set_default_ptype_table(dev);
1065         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1066         intr_handle = &pci_dev->intr_handle;
1067
1068         rte_eth_copy_pci_info(dev, pci_dev);
1069         dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1070
1071         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1072         pf->adapter->eth_dev = dev;
1073         pf->dev_data = dev->data;
1074
1075         hw->back = I40E_PF_TO_ADAPTER(pf);
1076         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1077         if (!hw->hw_addr) {
1078                 PMD_INIT_LOG(ERR,
1079                         "Hardware is not available, as address is NULL");
1080                 return -ENODEV;
1081         }
1082
1083         hw->vendor_id = pci_dev->id.vendor_id;
1084         hw->device_id = pci_dev->id.device_id;
1085         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1086         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1087         hw->bus.device = pci_dev->addr.devid;
1088         hw->bus.func = pci_dev->addr.function;
1089         hw->adapter_stopped = 0;
1090
1091         /* Make sure all is clean before doing PF reset */
1092         i40e_clear_hw(hw);
1093
1094         /* Initialize the hardware */
1095         i40e_hw_init(dev);
1096
1097         /* Reset here to make sure all is clean for each PF */
1098         ret = i40e_pf_reset(hw);
1099         if (ret) {
1100                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1101                 return ret;
1102         }
1103
1104         /* Initialize the shared code (base driver) */
1105         ret = i40e_init_shared_code(hw);
1106         if (ret) {
1107                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1108                 return ret;
1109         }
1110
1111         /*
1112          * To work around the NVM issue, initialize registers
1113          * for flexible payload and packet type of QinQ by
1114          * software. It should be removed once issues are fixed
1115          * in NVM.
1116          */
1117         i40e_GLQF_reg_init(hw);
1118
1119         /* Initialize the input set for filters (hash and fd) to default value */
1120         i40e_filter_input_set_init(pf);
1121
1122         /* Initialize the parameters for adminq */
1123         i40e_init_adminq_parameter(hw);
1124         ret = i40e_init_adminq(hw);
1125         if (ret != I40E_SUCCESS) {
1126                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1127                 return -EIO;
1128         }
1129         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1130                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1131                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1132                      ((hw->nvm.version >> 12) & 0xf),
1133                      ((hw->nvm.version >> 4) & 0xff),
1134                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1135
1136         /* initialise the L3_MAP register */
1137         ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
1138                                    0x00000028,  NULL);
1139         if (ret)
1140                 PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret);
1141
1142         /* Need the special FW version to support floating VEB */
1143         config_floating_veb(dev);
1144         /* Clear PXE mode */
1145         i40e_clear_pxe_mode(hw);
1146         i40e_dev_sync_phy_type(hw);
1147
1148         /*
1149          * On X710, performance number is far from the expectation on recent
1150          * firmware versions. The fix for this issue may not be integrated in
1151          * the following firmware version. So the workaround in software driver
1152          * is needed. It needs to modify the initial values of 3 internal only
1153          * registers. Note that the workaround can be removed when it is fixed
1154          * in firmware in the future.
1155          */
1156         i40e_configure_registers(hw);
1157
1158         /* Get hw capabilities */
1159         ret = i40e_get_cap(hw);
1160         if (ret != I40E_SUCCESS) {
1161                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1162                 goto err_get_capabilities;
1163         }
1164
1165         /* Initialize parameters for PF */
1166         ret = i40e_pf_parameter_init(dev);
1167         if (ret != 0) {
1168                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1169                 goto err_parameter_init;
1170         }
1171
1172         /* Initialize the queue management */
1173         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1174         if (ret < 0) {
1175                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1176                 goto err_qp_pool_init;
1177         }
1178         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1179                                 hw->func_caps.num_msix_vectors - 1);
1180         if (ret < 0) {
1181                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1182                 goto err_msix_pool_init;
1183         }
1184
1185         /* Initialize lan hmc */
1186         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1187                                 hw->func_caps.num_rx_qp, 0, 0);
1188         if (ret != I40E_SUCCESS) {
1189                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1190                 goto err_init_lan_hmc;
1191         }
1192
1193         /* Configure lan hmc */
1194         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1195         if (ret != I40E_SUCCESS) {
1196                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1197                 goto err_configure_lan_hmc;
1198         }
1199
1200         /* Get and check the mac address */
1201         i40e_get_mac_addr(hw, hw->mac.addr);
1202         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1203                 PMD_INIT_LOG(ERR, "mac address is not valid");
1204                 ret = -EIO;
1205                 goto err_get_mac_addr;
1206         }
1207         /* Copy the permanent MAC address */
1208         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1209                         (struct ether_addr *) hw->mac.perm_addr);
1210
1211         /* Disable flow control */
1212         hw->fc.requested_mode = I40E_FC_NONE;
1213         i40e_set_fc(hw, &aq_fail, TRUE);
1214
1215         /* Set the global registers with default ether type value */
1216         ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
1217         if (ret != I40E_SUCCESS) {
1218                 PMD_INIT_LOG(ERR,
1219                         "Failed to set the default outer VLAN ether type");
1220                 goto err_setup_pf_switch;
1221         }
1222
1223         /* PF setup, which includes VSI setup */
1224         ret = i40e_pf_setup(pf);
1225         if (ret) {
1226                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1227                 goto err_setup_pf_switch;
1228         }
1229
1230         /* reset all stats of the device, including pf and main vsi */
1231         i40e_dev_stats_reset(dev);
1232
1233         vsi = pf->main_vsi;
1234
1235         /* Disable double vlan by default */
1236         i40e_vsi_config_double_vlan(vsi, FALSE);
1237
1238         /* Disable S-TAG identification when floating_veb is disabled */
1239         if (!pf->floating_veb) {
1240                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1241                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1242                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1243                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1244                 }
1245         }
1246
1247         if (!vsi->max_macaddrs)
1248                 len = ETHER_ADDR_LEN;
1249         else
1250                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1251
1252         /* Should be after VSI initialized */
1253         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1254         if (!dev->data->mac_addrs) {
1255                 PMD_INIT_LOG(ERR,
1256                         "Failed to allocated memory for storing mac address");
1257                 goto err_mac_alloc;
1258         }
1259         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1260                                         &dev->data->mac_addrs[0]);
1261
1262         /* Init dcb to sw mode by default */
1263         ret = i40e_dcb_init_configure(dev, TRUE);
1264         if (ret != I40E_SUCCESS) {
1265                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1266                 pf->flags &= ~I40E_FLAG_DCB;
1267         }
1268         /* Update HW struct after DCB configuration */
1269         i40e_get_cap(hw);
1270
1271         /* initialize pf host driver to setup SRIOV resource if applicable */
1272         i40e_pf_host_init(dev);
1273
1274         /* register callback func to eal lib */
1275         rte_intr_callback_register(intr_handle,
1276                                    i40e_dev_interrupt_handler, dev);
1277
1278         /* configure and enable device interrupt */
1279         i40e_pf_config_irq0(hw, TRUE);
1280         i40e_pf_enable_irq0(hw);
1281
1282         /* enable uio intr after callback register */
1283         rte_intr_enable(intr_handle);
1284         /*
1285          * Add an ethertype filter to drop all flow control frames transmitted
1286          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1287          * frames to wire.
1288          */
1289         i40e_add_tx_flow_control_drop_filter(pf);
1290
1291         /* Set the max frame size to 0x2600 by default,
1292          * in case other drivers changed the default value.
1293          */
1294         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1295
1296         /* initialize mirror rule list */
1297         TAILQ_INIT(&pf->mirror_list);
1298
1299         ret = i40e_init_ethtype_filter_list(dev);
1300         if (ret < 0)
1301                 goto err_init_ethtype_filter_list;
1302         ret = i40e_init_tunnel_filter_list(dev);
1303         if (ret < 0)
1304                 goto err_init_tunnel_filter_list;
1305         ret = i40e_init_fdir_filter_list(dev);
1306         if (ret < 0)
1307                 goto err_init_fdir_filter_list;
1308
1309         return 0;
1310
1311 err_init_fdir_filter_list:
1312         rte_free(pf->tunnel.hash_table);
1313         rte_free(pf->tunnel.hash_map);
1314 err_init_tunnel_filter_list:
1315         rte_free(pf->ethertype.hash_table);
1316         rte_free(pf->ethertype.hash_map);
1317 err_init_ethtype_filter_list:
1318         rte_free(dev->data->mac_addrs);
1319 err_mac_alloc:
1320         i40e_vsi_release(pf->main_vsi);
1321 err_setup_pf_switch:
1322 err_get_mac_addr:
1323 err_configure_lan_hmc:
1324         (void)i40e_shutdown_lan_hmc(hw);
1325 err_init_lan_hmc:
1326         i40e_res_pool_destroy(&pf->msix_pool);
1327 err_msix_pool_init:
1328         i40e_res_pool_destroy(&pf->qp_pool);
1329 err_qp_pool_init:
1330 err_parameter_init:
1331 err_get_capabilities:
1332         (void)i40e_shutdown_adminq(hw);
1333
1334         return ret;
1335 }
1336
1337 static void
1338 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1339 {
1340         struct i40e_ethertype_filter *p_ethertype;
1341         struct i40e_ethertype_rule *ethertype_rule;
1342
1343         ethertype_rule = &pf->ethertype;
1344         /* Remove all ethertype filter rules and hash */
1345         if (ethertype_rule->hash_map)
1346                 rte_free(ethertype_rule->hash_map);
1347         if (ethertype_rule->hash_table)
1348                 rte_hash_free(ethertype_rule->hash_table);
1349
1350         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1351                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1352                              p_ethertype, rules);
1353                 rte_free(p_ethertype);
1354         }
1355 }
1356
1357 static void
1358 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1359 {
1360         struct i40e_tunnel_filter *p_tunnel;
1361         struct i40e_tunnel_rule *tunnel_rule;
1362
1363         tunnel_rule = &pf->tunnel;
1364         /* Remove all tunnel director rules and hash */
1365         if (tunnel_rule->hash_map)
1366                 rte_free(tunnel_rule->hash_map);
1367         if (tunnel_rule->hash_table)
1368                 rte_hash_free(tunnel_rule->hash_table);
1369
1370         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1371                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1372                 rte_free(p_tunnel);
1373         }
1374 }
1375
1376 static void
1377 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1378 {
1379         struct i40e_fdir_filter *p_fdir;
1380         struct i40e_fdir_info *fdir_info;
1381
1382         fdir_info = &pf->fdir;
1383         /* Remove all flow director rules and hash */
1384         if (fdir_info->hash_map)
1385                 rte_free(fdir_info->hash_map);
1386         if (fdir_info->hash_table)
1387                 rte_hash_free(fdir_info->hash_table);
1388
1389         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1390                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1391                 rte_free(p_fdir);
1392         }
1393 }
1394
1395 static int
1396 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1397 {
1398         struct i40e_pf *pf;
1399         struct rte_pci_device *pci_dev;
1400         struct rte_intr_handle *intr_handle;
1401         struct i40e_hw *hw;
1402         struct i40e_filter_control_settings settings;
1403         struct rte_flow *p_flow;
1404         int ret;
1405         uint8_t aq_fail = 0;
1406
1407         PMD_INIT_FUNC_TRACE();
1408
1409         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1410                 return 0;
1411
1412         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1413         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1414         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1415         intr_handle = &pci_dev->intr_handle;
1416
1417         if (hw->adapter_stopped == 0)
1418                 i40e_dev_close(dev);
1419
1420         dev->dev_ops = NULL;
1421         dev->rx_pkt_burst = NULL;
1422         dev->tx_pkt_burst = NULL;
1423
1424         /* Clear PXE mode */
1425         i40e_clear_pxe_mode(hw);
1426
1427         /* Unconfigure filter control */
1428         memset(&settings, 0, sizeof(settings));
1429         ret = i40e_set_filter_control(hw, &settings);
1430         if (ret)
1431                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1432                                         ret);
1433
1434         /* Disable flow control */
1435         hw->fc.requested_mode = I40E_FC_NONE;
1436         i40e_set_fc(hw, &aq_fail, TRUE);
1437
1438         /* uninitialize pf host driver */
1439         i40e_pf_host_uninit(dev);
1440
1441         rte_free(dev->data->mac_addrs);
1442         dev->data->mac_addrs = NULL;
1443
1444         /* disable uio intr before callback unregister */
1445         rte_intr_disable(intr_handle);
1446
1447         /* register callback func to eal lib */
1448         rte_intr_callback_unregister(intr_handle,
1449                                      i40e_dev_interrupt_handler, dev);
1450
1451         i40e_rm_ethtype_filter_list(pf);
1452         i40e_rm_tunnel_filter_list(pf);
1453         i40e_rm_fdir_filter_list(pf);
1454
1455         /* Remove all flows */
1456         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1457                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1458                 rte_free(p_flow);
1459         }
1460
1461         return 0;
1462 }
1463
1464 static int
1465 i40e_dev_configure(struct rte_eth_dev *dev)
1466 {
1467         struct i40e_adapter *ad =
1468                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1469         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1470         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1471         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1472         int i, ret;
1473
1474         ret = i40e_dev_sync_phy_type(hw);
1475         if (ret)
1476                 return ret;
1477
1478         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1479          * bulk allocation or vector Rx preconditions we will reset it.
1480          */
1481         ad->rx_bulk_alloc_allowed = true;
1482         ad->rx_vec_allowed = true;
1483         ad->tx_simple_allowed = true;
1484         ad->tx_vec_allowed = true;
1485
1486         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1487                 ret = i40e_fdir_setup(pf);
1488                 if (ret != I40E_SUCCESS) {
1489                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1490                         return -ENOTSUP;
1491                 }
1492                 ret = i40e_fdir_configure(dev);
1493                 if (ret < 0) {
1494                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1495                         goto err;
1496                 }
1497         } else
1498                 i40e_fdir_teardown(pf);
1499
1500         ret = i40e_dev_init_vlan(dev);
1501         if (ret < 0)
1502                 goto err;
1503
1504         /* VMDQ setup.
1505          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1506          *  RSS setting have different requirements.
1507          *  General PMD driver call sequence are NIC init, configure,
1508          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1509          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1510          *  applicable. So, VMDQ setting has to be done before
1511          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1512          *  For RSS setting, it will try to calculate actual configured RX queue
1513          *  number, which will be available after rx_queue_setup(). dev_start()
1514          *  function is good to place RSS setup.
1515          */
1516         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1517                 ret = i40e_vmdq_setup(dev);
1518                 if (ret)
1519                         goto err;
1520         }
1521
1522         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1523                 ret = i40e_dcb_setup(dev);
1524                 if (ret) {
1525                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1526                         goto err_dcb;
1527                 }
1528         }
1529
1530         TAILQ_INIT(&pf->flow_list);
1531
1532         return 0;
1533
1534 err_dcb:
1535         /* need to release vmdq resource if exists */
1536         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1537                 i40e_vsi_release(pf->vmdq[i].vsi);
1538                 pf->vmdq[i].vsi = NULL;
1539         }
1540         rte_free(pf->vmdq);
1541         pf->vmdq = NULL;
1542 err:
1543         /* need to release fdir resource if exists */
1544         i40e_fdir_teardown(pf);
1545         return ret;
1546 }
1547
1548 void
1549 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1550 {
1551         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1552         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1553         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1554         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1555         uint16_t msix_vect = vsi->msix_intr;
1556         uint16_t i;
1557
1558         for (i = 0; i < vsi->nb_qps; i++) {
1559                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1560                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1561                 rte_wmb();
1562         }
1563
1564         if (vsi->type != I40E_VSI_SRIOV) {
1565                 if (!rte_intr_allow_others(intr_handle)) {
1566                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1567                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1568                         I40E_WRITE_REG(hw,
1569                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1570                                        0);
1571                 } else {
1572                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1573                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1574                         I40E_WRITE_REG(hw,
1575                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1576                                                        msix_vect - 1), 0);
1577                 }
1578         } else {
1579                 uint32_t reg;
1580                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1581                         vsi->user_param + (msix_vect - 1);
1582
1583                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1584                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1585         }
1586         I40E_WRITE_FLUSH(hw);
1587 }
1588
1589 static void
1590 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1591                        int base_queue, int nb_queue)
1592 {
1593         int i;
1594         uint32_t val;
1595         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1596
1597         /* Bind all RX queues to allocated MSIX interrupt */
1598         for (i = 0; i < nb_queue; i++) {
1599                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1600                         I40E_QINT_RQCTL_ITR_INDX_MASK |
1601                         ((base_queue + i + 1) <<
1602                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1603                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1604                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1605
1606                 if (i == nb_queue - 1)
1607                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1608                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1609         }
1610
1611         /* Write first RX queue to Link list register as the head element */
1612         if (vsi->type != I40E_VSI_SRIOV) {
1613                 uint16_t interval =
1614                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1615
1616                 if (msix_vect == I40E_MISC_VEC_ID) {
1617                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1618                                        (base_queue <<
1619                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1620                                        (0x0 <<
1621                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1622                         I40E_WRITE_REG(hw,
1623                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1624                                        interval);
1625                 } else {
1626                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1627                                        (base_queue <<
1628                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1629                                        (0x0 <<
1630                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1631                         I40E_WRITE_REG(hw,
1632                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1633                                                        msix_vect - 1),
1634                                        interval);
1635                 }
1636         } else {
1637                 uint32_t reg;
1638
1639                 if (msix_vect == I40E_MISC_VEC_ID) {
1640                         I40E_WRITE_REG(hw,
1641                                        I40E_VPINT_LNKLST0(vsi->user_param),
1642                                        (base_queue <<
1643                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1644                                        (0x0 <<
1645                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1646                 } else {
1647                         /* num_msix_vectors_vf needs to minus irq0 */
1648                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1649                                 vsi->user_param + (msix_vect - 1);
1650
1651                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1652                                        (base_queue <<
1653                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1654                                        (0x0 <<
1655                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1656                 }
1657         }
1658
1659         I40E_WRITE_FLUSH(hw);
1660 }
1661
1662 void
1663 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
1664 {
1665         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1666         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1667         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1668         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1669         uint16_t msix_vect = vsi->msix_intr;
1670         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1671         uint16_t queue_idx = 0;
1672         int record = 0;
1673         uint32_t val;
1674         int i;
1675
1676         for (i = 0; i < vsi->nb_qps; i++) {
1677                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1678                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1679         }
1680
1681         /* INTENA flag is not auto-cleared for interrupt */
1682         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
1683         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
1684                 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
1685                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
1686         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
1687
1688         /* VF bind interrupt */
1689         if (vsi->type == I40E_VSI_SRIOV) {
1690                 __vsi_queues_bind_intr(vsi, msix_vect,
1691                                        vsi->base_queue, vsi->nb_qps);
1692                 return;
1693         }
1694
1695         /* PF & VMDq bind interrupt */
1696         if (rte_intr_dp_is_en(intr_handle)) {
1697                 if (vsi->type == I40E_VSI_MAIN) {
1698                         queue_idx = 0;
1699                         record = 1;
1700                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1701                         struct i40e_vsi *main_vsi =
1702                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1703                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1704                         record = 1;
1705                 }
1706         }
1707
1708         for (i = 0; i < vsi->nb_used_qps; i++) {
1709                 if (nb_msix <= 1) {
1710                         if (!rte_intr_allow_others(intr_handle))
1711                                 /* allow to share MISC_VEC_ID */
1712                                 msix_vect = I40E_MISC_VEC_ID;
1713
1714                         /* no enough msix_vect, map all to one */
1715                         __vsi_queues_bind_intr(vsi, msix_vect,
1716                                                vsi->base_queue + i,
1717                                                vsi->nb_used_qps - i);
1718                         for (; !!record && i < vsi->nb_used_qps; i++)
1719                                 intr_handle->intr_vec[queue_idx + i] =
1720                                         msix_vect;
1721                         break;
1722                 }
1723                 /* 1:1 queue/msix_vect mapping */
1724                 __vsi_queues_bind_intr(vsi, msix_vect,
1725                                        vsi->base_queue + i, 1);
1726                 if (!!record)
1727                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1728
1729                 msix_vect++;
1730                 nb_msix--;
1731         }
1732 }
1733
1734 static void
1735 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1736 {
1737         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1738         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1739         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1740         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1741         uint16_t interval = i40e_calc_itr_interval(\
1742                 RTE_LIBRTE_I40E_ITR_INTERVAL);
1743         uint16_t msix_intr, i;
1744
1745         if (rte_intr_allow_others(intr_handle))
1746                 for (i = 0; i < vsi->nb_msix; i++) {
1747                         msix_intr = vsi->msix_intr + i;
1748                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1749                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1750                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1751                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1752                                 (interval <<
1753                                  I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
1754                 }
1755         else
1756                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1757                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1758                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1759                                (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1760                                (interval <<
1761                                 I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
1762
1763         I40E_WRITE_FLUSH(hw);
1764 }
1765
1766 static void
1767 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1768 {
1769         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1770         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1771         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1772         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1773         uint16_t msix_intr, i;
1774
1775         if (rte_intr_allow_others(intr_handle))
1776                 for (i = 0; i < vsi->nb_msix; i++) {
1777                         msix_intr = vsi->msix_intr + i;
1778                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1779                                        0);
1780                 }
1781         else
1782                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
1783
1784         I40E_WRITE_FLUSH(hw);
1785 }
1786
1787 static inline uint8_t
1788 i40e_parse_link_speeds(uint16_t link_speeds)
1789 {
1790         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1791
1792         if (link_speeds & ETH_LINK_SPEED_40G)
1793                 link_speed |= I40E_LINK_SPEED_40GB;
1794         if (link_speeds & ETH_LINK_SPEED_25G)
1795                 link_speed |= I40E_LINK_SPEED_25GB;
1796         if (link_speeds & ETH_LINK_SPEED_20G)
1797                 link_speed |= I40E_LINK_SPEED_20GB;
1798         if (link_speeds & ETH_LINK_SPEED_10G)
1799                 link_speed |= I40E_LINK_SPEED_10GB;
1800         if (link_speeds & ETH_LINK_SPEED_1G)
1801                 link_speed |= I40E_LINK_SPEED_1GB;
1802         if (link_speeds & ETH_LINK_SPEED_100M)
1803                 link_speed |= I40E_LINK_SPEED_100MB;
1804
1805         return link_speed;
1806 }
1807
1808 static int
1809 i40e_phy_conf_link(struct i40e_hw *hw,
1810                    uint8_t abilities,
1811                    uint8_t force_speed)
1812 {
1813         enum i40e_status_code status;
1814         struct i40e_aq_get_phy_abilities_resp phy_ab;
1815         struct i40e_aq_set_phy_config phy_conf;
1816         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1817                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1818                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1819                         I40E_AQ_PHY_FLAG_LOW_POWER;
1820         const uint8_t advt = I40E_LINK_SPEED_40GB |
1821                         I40E_LINK_SPEED_25GB |
1822                         I40E_LINK_SPEED_10GB |
1823                         I40E_LINK_SPEED_1GB |
1824                         I40E_LINK_SPEED_100MB;
1825         int ret = -ENOTSUP;
1826
1827
1828         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1829                                               NULL);
1830         if (status)
1831                 return ret;
1832
1833         memset(&phy_conf, 0, sizeof(phy_conf));
1834
1835         /* bits 0-2 use the values from get_phy_abilities_resp */
1836         abilities &= ~mask;
1837         abilities |= phy_ab.abilities & mask;
1838
1839         /* update ablities and speed */
1840         if (abilities & I40E_AQ_PHY_AN_ENABLED)
1841                 phy_conf.link_speed = advt;
1842         else
1843                 phy_conf.link_speed = force_speed;
1844
1845         phy_conf.abilities = abilities;
1846
1847         /* use get_phy_abilities_resp value for the rest */
1848         phy_conf.phy_type = phy_ab.phy_type;
1849         phy_conf.phy_type_ext = phy_ab.phy_type_ext;
1850         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
1851         phy_conf.eee_capability = phy_ab.eee_capability;
1852         phy_conf.eeer = phy_ab.eeer_val;
1853         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1854
1855         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1856                     phy_ab.abilities, phy_ab.link_speed);
1857         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
1858                     phy_conf.abilities, phy_conf.link_speed);
1859
1860         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
1861         if (status)
1862                 return ret;
1863
1864         return I40E_SUCCESS;
1865 }
1866
1867 static int
1868 i40e_apply_link_speed(struct rte_eth_dev *dev)
1869 {
1870         uint8_t speed;
1871         uint8_t abilities = 0;
1872         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1873         struct rte_eth_conf *conf = &dev->data->dev_conf;
1874
1875         speed = i40e_parse_link_speeds(conf->link_speeds);
1876         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1877         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
1878                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1879         abilities |= I40E_AQ_PHY_LINK_ENABLED;
1880
1881         /* Skip changing speed on 40G interfaces, FW does not support */
1882         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
1883                 speed =  I40E_LINK_SPEED_UNKNOWN;
1884                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1885         }
1886
1887         return i40e_phy_conf_link(hw, abilities, speed);
1888 }
1889
1890 static int
1891 i40e_dev_start(struct rte_eth_dev *dev)
1892 {
1893         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1894         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1895         struct i40e_vsi *main_vsi = pf->main_vsi;
1896         int ret, i;
1897         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1898         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1899         uint32_t intr_vector = 0;
1900         struct i40e_vsi *vsi;
1901
1902         hw->adapter_stopped = 0;
1903
1904         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1905                 PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled",
1906                              dev->data->port_id);
1907                 return -EINVAL;
1908         }
1909
1910         rte_intr_disable(intr_handle);
1911
1912         if ((rte_intr_cap_multiple(intr_handle) ||
1913              !RTE_ETH_DEV_SRIOV(dev).active) &&
1914             dev->data->dev_conf.intr_conf.rxq != 0) {
1915                 intr_vector = dev->data->nb_rx_queues;
1916                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
1917                 if (ret)
1918                         return ret;
1919         }
1920
1921         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1922                 intr_handle->intr_vec =
1923                         rte_zmalloc("intr_vec",
1924                                     dev->data->nb_rx_queues * sizeof(int),
1925                                     0);
1926                 if (!intr_handle->intr_vec) {
1927                         PMD_INIT_LOG(ERR,
1928                                 "Failed to allocate %d rx_queues intr_vec",
1929                                 dev->data->nb_rx_queues);
1930                         return -ENOMEM;
1931                 }
1932         }
1933
1934         /* Initialize VSI */
1935         ret = i40e_dev_rxtx_init(pf);
1936         if (ret != I40E_SUCCESS) {
1937                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
1938                 goto err_up;
1939         }
1940
1941         /* Map queues with MSIX interrupt */
1942         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
1943                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1944         i40e_vsi_queues_bind_intr(main_vsi);
1945         i40e_vsi_enable_queues_intr(main_vsi);
1946
1947         /* Map VMDQ VSI queues with MSIX interrupt */
1948         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1949                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1950                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
1951                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
1952         }
1953
1954         /* enable FDIR MSIX interrupt */
1955         if (pf->fdir.fdir_vsi) {
1956                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
1957                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1958         }
1959
1960         /* Enable all queues which have been configured */
1961         ret = i40e_dev_switch_queues(pf, TRUE);
1962         if (ret != I40E_SUCCESS) {
1963                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
1964                 goto err_up;
1965         }
1966
1967         /* Enable receiving broadcast packets */
1968         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
1969         if (ret != I40E_SUCCESS)
1970                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1971
1972         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1973                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
1974                                                 true, NULL);
1975                 if (ret != I40E_SUCCESS)
1976                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1977         }
1978
1979         /* Enable the VLAN promiscuous mode. */
1980         if (pf->vfs) {
1981                 for (i = 0; i < pf->vf_num; i++) {
1982                         vsi = pf->vfs[i].vsi;
1983                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
1984                                                      true, NULL);
1985                 }
1986         }
1987
1988         /* Apply link configure */
1989         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
1990                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
1991                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
1992                                 ETH_LINK_SPEED_40G)) {
1993                 PMD_DRV_LOG(ERR, "Invalid link setting");
1994                 goto err_up;
1995         }
1996         ret = i40e_apply_link_speed(dev);
1997         if (I40E_SUCCESS != ret) {
1998                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
1999                 goto err_up;
2000         }
2001
2002         if (!rte_intr_allow_others(intr_handle)) {
2003                 rte_intr_callback_unregister(intr_handle,
2004                                              i40e_dev_interrupt_handler,
2005                                              (void *)dev);
2006                 /* configure and enable device interrupt */
2007                 i40e_pf_config_irq0(hw, FALSE);
2008                 i40e_pf_enable_irq0(hw);
2009
2010                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2011                         PMD_INIT_LOG(INFO,
2012                                 "lsc won't enable because of no intr multiplex");
2013         } else if (dev->data->dev_conf.intr_conf.lsc != 0) {
2014                 ret = i40e_aq_set_phy_int_mask(hw,
2015                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2016                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2017                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2018                 if (ret != I40E_SUCCESS)
2019                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2020
2021                 /* Call get_link_info aq commond to enable LSE */
2022                 i40e_dev_link_update(dev, 0);
2023         }
2024
2025         /* enable uio intr after callback register */
2026         rte_intr_enable(intr_handle);
2027
2028         i40e_filter_restore(pf);
2029
2030         return I40E_SUCCESS;
2031
2032 err_up:
2033         i40e_dev_switch_queues(pf, FALSE);
2034         i40e_dev_clear_queues(dev);
2035
2036         return ret;
2037 }
2038
2039 static void
2040 i40e_dev_stop(struct rte_eth_dev *dev)
2041 {
2042         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2043         struct i40e_vsi *main_vsi = pf->main_vsi;
2044         struct i40e_mirror_rule *p_mirror;
2045         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2046         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2047         int i;
2048
2049         /* Disable all queues */
2050         i40e_dev_switch_queues(pf, FALSE);
2051
2052         /* un-map queues with interrupt registers */
2053         i40e_vsi_disable_queues_intr(main_vsi);
2054         i40e_vsi_queues_unbind_intr(main_vsi);
2055
2056         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2057                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2058                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2059         }
2060
2061         if (pf->fdir.fdir_vsi) {
2062                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2063                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2064         }
2065         /* Clear all queues and release memory */
2066         i40e_dev_clear_queues(dev);
2067
2068         /* Set link down */
2069         i40e_dev_set_link_down(dev);
2070
2071         /* Remove all mirror rules */
2072         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2073                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2074                 rte_free(p_mirror);
2075         }
2076         pf->nb_mirror_rule = 0;
2077
2078         if (!rte_intr_allow_others(intr_handle))
2079                 /* resume to the default handler */
2080                 rte_intr_callback_register(intr_handle,
2081                                            i40e_dev_interrupt_handler,
2082                                            (void *)dev);
2083
2084         /* Clean datapath event and queue/vec mapping */
2085         rte_intr_efd_disable(intr_handle);
2086         if (intr_handle->intr_vec) {
2087                 rte_free(intr_handle->intr_vec);
2088                 intr_handle->intr_vec = NULL;
2089         }
2090 }
2091
2092 static void
2093 i40e_dev_close(struct rte_eth_dev *dev)
2094 {
2095         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2096         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2097         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2098         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2099         uint32_t reg;
2100         int i;
2101
2102         PMD_INIT_FUNC_TRACE();
2103
2104         i40e_dev_stop(dev);
2105         hw->adapter_stopped = 1;
2106         i40e_dev_free_queues(dev);
2107
2108         /* Disable interrupt */
2109         i40e_pf_disable_irq0(hw);
2110         rte_intr_disable(intr_handle);
2111
2112         /* shutdown and destroy the HMC */
2113         i40e_shutdown_lan_hmc(hw);
2114
2115         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2116                 i40e_vsi_release(pf->vmdq[i].vsi);
2117                 pf->vmdq[i].vsi = NULL;
2118         }
2119         rte_free(pf->vmdq);
2120         pf->vmdq = NULL;
2121
2122         /* release all the existing VSIs and VEBs */
2123         i40e_fdir_teardown(pf);
2124         i40e_vsi_release(pf->main_vsi);
2125
2126         /* shutdown the adminq */
2127         i40e_aq_queue_shutdown(hw, true);
2128         i40e_shutdown_adminq(hw);
2129
2130         i40e_res_pool_destroy(&pf->qp_pool);
2131         i40e_res_pool_destroy(&pf->msix_pool);
2132
2133         /* force a PF reset to clean anything leftover */
2134         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2135         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2136                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2137         I40E_WRITE_FLUSH(hw);
2138 }
2139
2140 static void
2141 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2142 {
2143         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2144         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2145         struct i40e_vsi *vsi = pf->main_vsi;
2146         int status;
2147
2148         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2149                                                      true, NULL, true);
2150         if (status != I40E_SUCCESS)
2151                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2152
2153         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2154                                                         TRUE, NULL);
2155         if (status != I40E_SUCCESS)
2156                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2157
2158 }
2159
2160 static void
2161 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2162 {
2163         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2164         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2165         struct i40e_vsi *vsi = pf->main_vsi;
2166         int status;
2167
2168         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2169                                                      false, NULL, true);
2170         if (status != I40E_SUCCESS)
2171                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2172
2173         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2174                                                         false, NULL);
2175         if (status != I40E_SUCCESS)
2176                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2177 }
2178
2179 static void
2180 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2181 {
2182         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2183         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2184         struct i40e_vsi *vsi = pf->main_vsi;
2185         int ret;
2186
2187         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2188         if (ret != I40E_SUCCESS)
2189                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2190 }
2191
2192 static void
2193 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2194 {
2195         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2196         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2197         struct i40e_vsi *vsi = pf->main_vsi;
2198         int ret;
2199
2200         if (dev->data->promiscuous == 1)
2201                 return; /* must remain in all_multicast mode */
2202
2203         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2204                                 vsi->seid, FALSE, NULL);
2205         if (ret != I40E_SUCCESS)
2206                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2207 }
2208
2209 /*
2210  * Set device link up.
2211  */
2212 static int
2213 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2214 {
2215         /* re-apply link speed setting */
2216         return i40e_apply_link_speed(dev);
2217 }
2218
2219 /*
2220  * Set device link down.
2221  */
2222 static int
2223 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2224 {
2225         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2226         uint8_t abilities = 0;
2227         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2228
2229         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2230         return i40e_phy_conf_link(hw, abilities, speed);
2231 }
2232
2233 int
2234 i40e_dev_link_update(struct rte_eth_dev *dev,
2235                      int wait_to_complete)
2236 {
2237 #define CHECK_INTERVAL 100  /* 100ms */
2238 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
2239         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2240         struct i40e_link_status link_status;
2241         struct rte_eth_link link, old;
2242         int status;
2243         unsigned rep_cnt = MAX_REPEAT_TIME;
2244         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2245
2246         memset(&link, 0, sizeof(link));
2247         memset(&old, 0, sizeof(old));
2248         memset(&link_status, 0, sizeof(link_status));
2249         rte_i40e_dev_atomic_read_link_status(dev, &old);
2250
2251         do {
2252                 /* Get link status information from hardware */
2253                 status = i40e_aq_get_link_info(hw, enable_lse,
2254                                                 &link_status, NULL);
2255                 if (status != I40E_SUCCESS) {
2256                         link.link_speed = ETH_SPEED_NUM_100M;
2257                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2258                         PMD_DRV_LOG(ERR, "Failed to get link info");
2259                         goto out;
2260                 }
2261
2262                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
2263                 if (!wait_to_complete || link.link_status)
2264                         break;
2265
2266                 rte_delay_ms(CHECK_INTERVAL);
2267         } while (--rep_cnt);
2268
2269         if (!link.link_status)
2270                 goto out;
2271
2272         /* i40e uses full duplex only */
2273         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2274
2275         /* Parse the link status */
2276         switch (link_status.link_speed) {
2277         case I40E_LINK_SPEED_100MB:
2278                 link.link_speed = ETH_SPEED_NUM_100M;
2279                 break;
2280         case I40E_LINK_SPEED_1GB:
2281                 link.link_speed = ETH_SPEED_NUM_1G;
2282                 break;
2283         case I40E_LINK_SPEED_10GB:
2284                 link.link_speed = ETH_SPEED_NUM_10G;
2285                 break;
2286         case I40E_LINK_SPEED_20GB:
2287                 link.link_speed = ETH_SPEED_NUM_20G;
2288                 break;
2289         case I40E_LINK_SPEED_25GB:
2290                 link.link_speed = ETH_SPEED_NUM_25G;
2291                 break;
2292         case I40E_LINK_SPEED_40GB:
2293                 link.link_speed = ETH_SPEED_NUM_40G;
2294                 break;
2295         default:
2296                 link.link_speed = ETH_SPEED_NUM_100M;
2297                 break;
2298         }
2299
2300         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2301                         ETH_LINK_SPEED_FIXED);
2302
2303 out:
2304         rte_i40e_dev_atomic_write_link_status(dev, &link);
2305         if (link.link_status == old.link_status)
2306                 return -1;
2307
2308         i40e_notify_all_vfs_link_status(dev);
2309
2310         return 0;
2311 }
2312
2313 /* Get all the statistics of a VSI */
2314 void
2315 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2316 {
2317         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2318         struct i40e_eth_stats *nes = &vsi->eth_stats;
2319         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2320         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2321
2322         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2323                             vsi->offset_loaded, &oes->rx_bytes,
2324                             &nes->rx_bytes);
2325         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2326                             vsi->offset_loaded, &oes->rx_unicast,
2327                             &nes->rx_unicast);
2328         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2329                             vsi->offset_loaded, &oes->rx_multicast,
2330                             &nes->rx_multicast);
2331         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2332                             vsi->offset_loaded, &oes->rx_broadcast,
2333                             &nes->rx_broadcast);
2334         /* exclude CRC bytes */
2335         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2336                 nes->rx_broadcast) * ETHER_CRC_LEN;
2337
2338         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2339                             &oes->rx_discards, &nes->rx_discards);
2340         /* GLV_REPC not supported */
2341         /* GLV_RMPC not supported */
2342         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2343                             &oes->rx_unknown_protocol,
2344                             &nes->rx_unknown_protocol);
2345         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2346                             vsi->offset_loaded, &oes->tx_bytes,
2347                             &nes->tx_bytes);
2348         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2349                             vsi->offset_loaded, &oes->tx_unicast,
2350                             &nes->tx_unicast);
2351         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2352                             vsi->offset_loaded, &oes->tx_multicast,
2353                             &nes->tx_multicast);
2354         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2355                             vsi->offset_loaded,  &oes->tx_broadcast,
2356                             &nes->tx_broadcast);
2357         /* exclude CRC bytes */
2358         nes->tx_bytes -= (nes->tx_unicast + nes->tx_multicast +
2359                 nes->tx_broadcast) * ETHER_CRC_LEN;
2360         /* GLV_TDPC not supported */
2361         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2362                             &oes->tx_errors, &nes->tx_errors);
2363         vsi->offset_loaded = true;
2364
2365         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2366                     vsi->vsi_id);
2367         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2368         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2369         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2370         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2371         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2372         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2373                     nes->rx_unknown_protocol);
2374         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2375         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2376         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2377         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2378         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2379         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2380         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2381                     vsi->vsi_id);
2382 }
2383
2384 static void
2385 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2386 {
2387         unsigned int i;
2388         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2389         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2390
2391         /* Get rx/tx bytes of internal transfer packets */
2392         i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2393                         I40E_GLV_GORCL(hw->port),
2394                         pf->offset_loaded,
2395                         &pf->internal_stats_offset.rx_bytes,
2396                         &pf->internal_stats.rx_bytes);
2397
2398         i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2399                         I40E_GLV_GOTCL(hw->port),
2400                         pf->offset_loaded,
2401                         &pf->internal_stats_offset.tx_bytes,
2402                         &pf->internal_stats.tx_bytes);
2403         /* Get total internal rx packet count */
2404         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2405                             I40E_GLV_UPRCL(hw->port),
2406                             pf->offset_loaded,
2407                             &pf->internal_stats_offset.rx_unicast,
2408                             &pf->internal_stats.rx_unicast);
2409         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2410                             I40E_GLV_MPRCL(hw->port),
2411                             pf->offset_loaded,
2412                             &pf->internal_stats_offset.rx_multicast,
2413                             &pf->internal_stats.rx_multicast);
2414         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2415                             I40E_GLV_BPRCL(hw->port),
2416                             pf->offset_loaded,
2417                             &pf->internal_stats_offset.rx_broadcast,
2418                             &pf->internal_stats.rx_broadcast);
2419
2420         /* exclude CRC size */
2421         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2422                 pf->internal_stats.rx_multicast +
2423                 pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2424
2425         /* Get statistics of struct i40e_eth_stats */
2426         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2427                             I40E_GLPRT_GORCL(hw->port),
2428                             pf->offset_loaded, &os->eth.rx_bytes,
2429                             &ns->eth.rx_bytes);
2430         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2431                             I40E_GLPRT_UPRCL(hw->port),
2432                             pf->offset_loaded, &os->eth.rx_unicast,
2433                             &ns->eth.rx_unicast);
2434         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2435                             I40E_GLPRT_MPRCL(hw->port),
2436                             pf->offset_loaded, &os->eth.rx_multicast,
2437                             &ns->eth.rx_multicast);
2438         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2439                             I40E_GLPRT_BPRCL(hw->port),
2440                             pf->offset_loaded, &os->eth.rx_broadcast,
2441                             &ns->eth.rx_broadcast);
2442         /* Workaround: CRC size should not be included in byte statistics,
2443          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2444          */
2445         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2446                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2447
2448         /* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2449          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negtive
2450          * value.
2451          */
2452         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2453                 ns->eth.rx_bytes = 0;
2454         /* exlude internal rx bytes */
2455         else
2456                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2457
2458         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2459                             pf->offset_loaded, &os->eth.rx_discards,
2460                             &ns->eth.rx_discards);
2461         /* GLPRT_REPC not supported */
2462         /* GLPRT_RMPC not supported */
2463         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2464                             pf->offset_loaded,
2465                             &os->eth.rx_unknown_protocol,
2466                             &ns->eth.rx_unknown_protocol);
2467         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2468                             I40E_GLPRT_GOTCL(hw->port),
2469                             pf->offset_loaded, &os->eth.tx_bytes,
2470                             &ns->eth.tx_bytes);
2471         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2472                             I40E_GLPRT_UPTCL(hw->port),
2473                             pf->offset_loaded, &os->eth.tx_unicast,
2474                             &ns->eth.tx_unicast);
2475         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2476                             I40E_GLPRT_MPTCL(hw->port),
2477                             pf->offset_loaded, &os->eth.tx_multicast,
2478                             &ns->eth.tx_multicast);
2479         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2480                             I40E_GLPRT_BPTCL(hw->port),
2481                             pf->offset_loaded, &os->eth.tx_broadcast,
2482                             &ns->eth.tx_broadcast);
2483         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2484                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2485
2486         /* exclude internal tx bytes */
2487         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
2488                 ns->eth.tx_bytes = 0;
2489         else
2490                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
2491
2492         /* GLPRT_TEPC not supported */
2493
2494         /* additional port specific stats */
2495         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2496                             pf->offset_loaded, &os->tx_dropped_link_down,
2497                             &ns->tx_dropped_link_down);
2498         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2499                             pf->offset_loaded, &os->crc_errors,
2500                             &ns->crc_errors);
2501         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2502                             pf->offset_loaded, &os->illegal_bytes,
2503                             &ns->illegal_bytes);
2504         /* GLPRT_ERRBC not supported */
2505         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2506                             pf->offset_loaded, &os->mac_local_faults,
2507                             &ns->mac_local_faults);
2508         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2509                             pf->offset_loaded, &os->mac_remote_faults,
2510                             &ns->mac_remote_faults);
2511         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2512                             pf->offset_loaded, &os->rx_length_errors,
2513                             &ns->rx_length_errors);
2514         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2515                             pf->offset_loaded, &os->link_xon_rx,
2516                             &ns->link_xon_rx);
2517         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2518                             pf->offset_loaded, &os->link_xoff_rx,
2519                             &ns->link_xoff_rx);
2520         for (i = 0; i < 8; i++) {
2521                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2522                                     pf->offset_loaded,
2523                                     &os->priority_xon_rx[i],
2524                                     &ns->priority_xon_rx[i]);
2525                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2526                                     pf->offset_loaded,
2527                                     &os->priority_xoff_rx[i],
2528                                     &ns->priority_xoff_rx[i]);
2529         }
2530         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2531                             pf->offset_loaded, &os->link_xon_tx,
2532                             &ns->link_xon_tx);
2533         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2534                             pf->offset_loaded, &os->link_xoff_tx,
2535                             &ns->link_xoff_tx);
2536         for (i = 0; i < 8; i++) {
2537                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2538                                     pf->offset_loaded,
2539                                     &os->priority_xon_tx[i],
2540                                     &ns->priority_xon_tx[i]);
2541                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2542                                     pf->offset_loaded,
2543                                     &os->priority_xoff_tx[i],
2544                                     &ns->priority_xoff_tx[i]);
2545                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2546                                     pf->offset_loaded,
2547                                     &os->priority_xon_2_xoff[i],
2548                                     &ns->priority_xon_2_xoff[i]);
2549         }
2550         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2551                             I40E_GLPRT_PRC64L(hw->port),
2552                             pf->offset_loaded, &os->rx_size_64,
2553                             &ns->rx_size_64);
2554         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2555                             I40E_GLPRT_PRC127L(hw->port),
2556                             pf->offset_loaded, &os->rx_size_127,
2557                             &ns->rx_size_127);
2558         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2559                             I40E_GLPRT_PRC255L(hw->port),
2560                             pf->offset_loaded, &os->rx_size_255,
2561                             &ns->rx_size_255);
2562         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2563                             I40E_GLPRT_PRC511L(hw->port),
2564                             pf->offset_loaded, &os->rx_size_511,
2565                             &ns->rx_size_511);
2566         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2567                             I40E_GLPRT_PRC1023L(hw->port),
2568                             pf->offset_loaded, &os->rx_size_1023,
2569                             &ns->rx_size_1023);
2570         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2571                             I40E_GLPRT_PRC1522L(hw->port),
2572                             pf->offset_loaded, &os->rx_size_1522,
2573                             &ns->rx_size_1522);
2574         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2575                             I40E_GLPRT_PRC9522L(hw->port),
2576                             pf->offset_loaded, &os->rx_size_big,
2577                             &ns->rx_size_big);
2578         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2579                             pf->offset_loaded, &os->rx_undersize,
2580                             &ns->rx_undersize);
2581         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2582                             pf->offset_loaded, &os->rx_fragments,
2583                             &ns->rx_fragments);
2584         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2585                             pf->offset_loaded, &os->rx_oversize,
2586                             &ns->rx_oversize);
2587         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2588                             pf->offset_loaded, &os->rx_jabber,
2589                             &ns->rx_jabber);
2590         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2591                             I40E_GLPRT_PTC64L(hw->port),
2592                             pf->offset_loaded, &os->tx_size_64,
2593                             &ns->tx_size_64);
2594         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2595                             I40E_GLPRT_PTC127L(hw->port),
2596                             pf->offset_loaded, &os->tx_size_127,
2597                             &ns->tx_size_127);
2598         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2599                             I40E_GLPRT_PTC255L(hw->port),
2600                             pf->offset_loaded, &os->tx_size_255,
2601                             &ns->tx_size_255);
2602         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2603                             I40E_GLPRT_PTC511L(hw->port),
2604                             pf->offset_loaded, &os->tx_size_511,
2605                             &ns->tx_size_511);
2606         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2607                             I40E_GLPRT_PTC1023L(hw->port),
2608                             pf->offset_loaded, &os->tx_size_1023,
2609                             &ns->tx_size_1023);
2610         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2611                             I40E_GLPRT_PTC1522L(hw->port),
2612                             pf->offset_loaded, &os->tx_size_1522,
2613                             &ns->tx_size_1522);
2614         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2615                             I40E_GLPRT_PTC9522L(hw->port),
2616                             pf->offset_loaded, &os->tx_size_big,
2617                             &ns->tx_size_big);
2618         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2619                            pf->offset_loaded,
2620                            &os->fd_sb_match, &ns->fd_sb_match);
2621         /* GLPRT_MSPDC not supported */
2622         /* GLPRT_XEC not supported */
2623
2624         pf->offset_loaded = true;
2625
2626         if (pf->main_vsi)
2627                 i40e_update_vsi_stats(pf->main_vsi);
2628 }
2629
2630 /* Get all statistics of a port */
2631 static void
2632 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2633 {
2634         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2635         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2636         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2637         unsigned i;
2638
2639         /* call read registers - updates values, now write them to struct */
2640         i40e_read_stats_registers(pf, hw);
2641
2642         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
2643                         pf->main_vsi->eth_stats.rx_multicast +
2644                         pf->main_vsi->eth_stats.rx_broadcast -
2645                         pf->main_vsi->eth_stats.rx_discards;
2646         stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
2647                         pf->main_vsi->eth_stats.tx_multicast +
2648                         pf->main_vsi->eth_stats.tx_broadcast;
2649         stats->ibytes   = ns->eth.rx_bytes;
2650         stats->obytes   = ns->eth.tx_bytes;
2651         stats->oerrors  = ns->eth.tx_errors +
2652                         pf->main_vsi->eth_stats.tx_errors;
2653
2654         /* Rx Errors */
2655         stats->imissed  = ns->eth.rx_discards +
2656                         pf->main_vsi->eth_stats.rx_discards;
2657         stats->ierrors  = ns->crc_errors +
2658                         ns->rx_length_errors + ns->rx_undersize +
2659                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2660
2661         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2662         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2663         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2664         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2665         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2666         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2667         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2668                     ns->eth.rx_unknown_protocol);
2669         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2670         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2671         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2672         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2673         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2674         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2675
2676         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2677                     ns->tx_dropped_link_down);
2678         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2679         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2680                     ns->illegal_bytes);
2681         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2682         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2683                     ns->mac_local_faults);
2684         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2685                     ns->mac_remote_faults);
2686         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2687                     ns->rx_length_errors);
2688         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2689         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2690         for (i = 0; i < 8; i++) {
2691                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
2692                                 i, ns->priority_xon_rx[i]);
2693                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
2694                                 i, ns->priority_xoff_rx[i]);
2695         }
2696         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
2697         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
2698         for (i = 0; i < 8; i++) {
2699                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
2700                                 i, ns->priority_xon_tx[i]);
2701                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
2702                                 i, ns->priority_xoff_tx[i]);
2703                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
2704                                 i, ns->priority_xon_2_xoff[i]);
2705         }
2706         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
2707         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
2708         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
2709         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
2710         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
2711         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
2712         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
2713         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
2714         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
2715         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
2716         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
2717         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
2718         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
2719         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
2720         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
2721         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
2722         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
2723         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
2724         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2725                         ns->mac_short_packet_dropped);
2726         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
2727                     ns->checksum_error);
2728         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
2729         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2730 }
2731
2732 /* Reset the statistics */
2733 static void
2734 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2735 {
2736         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2737         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2738
2739         /* Mark PF and VSI stats to update the offset, aka "reset" */
2740         pf->offset_loaded = false;
2741         if (pf->main_vsi)
2742                 pf->main_vsi->offset_loaded = false;
2743
2744         /* read the stats, reading current register values into offset */
2745         i40e_read_stats_registers(pf, hw);
2746 }
2747
2748 static uint32_t
2749 i40e_xstats_calc_num(void)
2750 {
2751         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
2752                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
2753                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
2754 }
2755
2756 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2757                                      struct rte_eth_xstat_name *xstats_names,
2758                                      __rte_unused unsigned limit)
2759 {
2760         unsigned count = 0;
2761         unsigned i, prio;
2762
2763         if (xstats_names == NULL)
2764                 return i40e_xstats_calc_num();
2765
2766         /* Note: limit checked in rte_eth_xstats_names() */
2767
2768         /* Get stats from i40e_eth_stats struct */
2769         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2770                 snprintf(xstats_names[count].name,
2771                          sizeof(xstats_names[count].name),
2772                          "%s", rte_i40e_stats_strings[i].name);
2773                 count++;
2774         }
2775
2776         /* Get individiual stats from i40e_hw_port struct */
2777         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2778                 snprintf(xstats_names[count].name,
2779                         sizeof(xstats_names[count].name),
2780                          "%s", rte_i40e_hw_port_strings[i].name);
2781                 count++;
2782         }
2783
2784         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2785                 for (prio = 0; prio < 8; prio++) {
2786                         snprintf(xstats_names[count].name,
2787                                  sizeof(xstats_names[count].name),
2788                                  "rx_priority%u_%s", prio,
2789                                  rte_i40e_rxq_prio_strings[i].name);
2790                         count++;
2791                 }
2792         }
2793
2794         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2795                 for (prio = 0; prio < 8; prio++) {
2796                         snprintf(xstats_names[count].name,
2797                                  sizeof(xstats_names[count].name),
2798                                  "tx_priority%u_%s", prio,
2799                                  rte_i40e_txq_prio_strings[i].name);
2800                         count++;
2801                 }
2802         }
2803         return count;
2804 }
2805
2806 static int
2807 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2808                     unsigned n)
2809 {
2810         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2811         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2812         unsigned i, count, prio;
2813         struct i40e_hw_port_stats *hw_stats = &pf->stats;
2814
2815         count = i40e_xstats_calc_num();
2816         if (n < count)
2817                 return count;
2818
2819         i40e_read_stats_registers(pf, hw);
2820
2821         if (xstats == NULL)
2822                 return 0;
2823
2824         count = 0;
2825
2826         /* Get stats from i40e_eth_stats struct */
2827         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2828                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
2829                         rte_i40e_stats_strings[i].offset);
2830                 xstats[count].id = count;
2831                 count++;
2832         }
2833
2834         /* Get individiual stats from i40e_hw_port struct */
2835         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2836                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2837                         rte_i40e_hw_port_strings[i].offset);
2838                 xstats[count].id = count;
2839                 count++;
2840         }
2841
2842         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2843                 for (prio = 0; prio < 8; prio++) {
2844                         xstats[count].value =
2845                                 *(uint64_t *)(((char *)hw_stats) +
2846                                 rte_i40e_rxq_prio_strings[i].offset +
2847                                 (sizeof(uint64_t) * prio));
2848                         xstats[count].id = count;
2849                         count++;
2850                 }
2851         }
2852
2853         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2854                 for (prio = 0; prio < 8; prio++) {
2855                         xstats[count].value =
2856                                 *(uint64_t *)(((char *)hw_stats) +
2857                                 rte_i40e_txq_prio_strings[i].offset +
2858                                 (sizeof(uint64_t) * prio));
2859                         xstats[count].id = count;
2860                         count++;
2861                 }
2862         }
2863
2864         return count;
2865 }
2866
2867 static int
2868 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
2869                                  __rte_unused uint16_t queue_id,
2870                                  __rte_unused uint8_t stat_idx,
2871                                  __rte_unused uint8_t is_rx)
2872 {
2873         PMD_INIT_FUNC_TRACE();
2874
2875         return -ENOSYS;
2876 }
2877
2878 static int
2879 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2880 {
2881         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2882         u32 full_ver;
2883         u8 ver, patch;
2884         u16 build;
2885         int ret;
2886
2887         full_ver = hw->nvm.oem_ver;
2888         ver = (u8)(full_ver >> 24);
2889         build = (u16)((full_ver >> 8) & 0xffff);
2890         patch = (u8)(full_ver & 0xff);
2891
2892         ret = snprintf(fw_version, fw_size,
2893                  "%d.%d%d 0x%08x %d.%d.%d",
2894                  ((hw->nvm.version >> 12) & 0xf),
2895                  ((hw->nvm.version >> 4) & 0xff),
2896                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
2897                  ver, build, patch);
2898
2899         ret += 1; /* add the size of '\0' */
2900         if (fw_size < (u32)ret)
2901                 return ret;
2902         else
2903                 return 0;
2904 }
2905
2906 static void
2907 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2908 {
2909         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2910         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2911         struct i40e_vsi *vsi = pf->main_vsi;
2912         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2913
2914         dev_info->pci_dev = pci_dev;
2915         dev_info->max_rx_queues = vsi->nb_qps;
2916         dev_info->max_tx_queues = vsi->nb_qps;
2917         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2918         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2919         dev_info->max_mac_addrs = vsi->max_macaddrs;
2920         dev_info->max_vfs = pci_dev->max_vfs;
2921         dev_info->rx_offload_capa =
2922                 DEV_RX_OFFLOAD_VLAN_STRIP |
2923                 DEV_RX_OFFLOAD_QINQ_STRIP |
2924                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2925                 DEV_RX_OFFLOAD_UDP_CKSUM |
2926                 DEV_RX_OFFLOAD_TCP_CKSUM;
2927         dev_info->tx_offload_capa =
2928                 DEV_TX_OFFLOAD_VLAN_INSERT |
2929                 DEV_TX_OFFLOAD_QINQ_INSERT |
2930                 DEV_TX_OFFLOAD_IPV4_CKSUM |
2931                 DEV_TX_OFFLOAD_UDP_CKSUM |
2932                 DEV_TX_OFFLOAD_TCP_CKSUM |
2933                 DEV_TX_OFFLOAD_SCTP_CKSUM |
2934                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2935                 DEV_TX_OFFLOAD_TCP_TSO |
2936                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
2937                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
2938                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
2939                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
2940         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
2941                                                 sizeof(uint32_t);
2942         dev_info->reta_size = pf->hash_lut_size;
2943         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
2944
2945         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2946                 .rx_thresh = {
2947                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
2948                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
2949                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
2950                 },
2951                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2952                 .rx_drop_en = 0,
2953         };
2954
2955         dev_info->default_txconf = (struct rte_eth_txconf) {
2956                 .tx_thresh = {
2957                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
2958                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
2959                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
2960                 },
2961                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2962                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2963                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2964                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2965         };
2966
2967         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2968                 .nb_max = I40E_MAX_RING_DESC,
2969                 .nb_min = I40E_MIN_RING_DESC,
2970                 .nb_align = I40E_ALIGN_RING_DESC,
2971         };
2972
2973         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2974                 .nb_max = I40E_MAX_RING_DESC,
2975                 .nb_min = I40E_MIN_RING_DESC,
2976                 .nb_align = I40E_ALIGN_RING_DESC,
2977                 .nb_seg_max = I40E_TX_MAX_SEG,
2978                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
2979         };
2980
2981         if (pf->flags & I40E_FLAG_VMDQ) {
2982                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
2983                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
2984                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
2985                                                 pf->max_nb_vmdq_vsi;
2986                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
2987                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
2988                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
2989         }
2990
2991         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
2992                 /* For XL710 */
2993                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
2994         else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
2995                 /* For XXV710 */
2996                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
2997         else
2998                 /* For X710 */
2999                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3000 }
3001
3002 static int
3003 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3004 {
3005         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3006         struct i40e_vsi *vsi = pf->main_vsi;
3007         PMD_INIT_FUNC_TRACE();
3008
3009         if (on)
3010                 return i40e_vsi_add_vlan(vsi, vlan_id);
3011         else
3012                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3013 }
3014
3015 static int
3016 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3017                                 enum rte_vlan_type vlan_type,
3018                                 uint16_t tpid, int qinq)
3019 {
3020         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3021         uint64_t reg_r = 0;
3022         uint64_t reg_w = 0;
3023         uint16_t reg_id = 3;
3024         int ret;
3025
3026         if (qinq) {
3027                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3028                         reg_id = 2;
3029         }
3030
3031         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3032                                           &reg_r, NULL);
3033         if (ret != I40E_SUCCESS) {
3034                 PMD_DRV_LOG(ERR,
3035                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3036                            reg_id);
3037                 return -EIO;
3038         }
3039         PMD_DRV_LOG(DEBUG,
3040                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3041                     reg_id, reg_r);
3042
3043         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3044         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3045         if (reg_r == reg_w) {
3046                 PMD_DRV_LOG(DEBUG, "No need to write");
3047                 return 0;
3048         }
3049
3050         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3051                                            reg_w, NULL);
3052         if (ret != I40E_SUCCESS) {
3053                 PMD_DRV_LOG(ERR,
3054                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3055                             reg_id);
3056                 return -EIO;
3057         }
3058         PMD_DRV_LOG(DEBUG,
3059                     "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
3060                     reg_w, reg_id);
3061
3062         return 0;
3063 }
3064
3065 static int
3066 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3067                    enum rte_vlan_type vlan_type,
3068                    uint16_t tpid)
3069 {
3070         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3071         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
3072         int ret = 0;
3073
3074         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3075              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3076             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3077                 PMD_DRV_LOG(ERR,
3078                             "Unsupported vlan type.");
3079                 return -EINVAL;
3080         }
3081         /* 802.1ad frames ability is added in NVM API 1.7*/
3082         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3083                 if (qinq) {
3084                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3085                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3086                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3087                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3088                 } else {
3089                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3090                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3091                 }
3092                 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3093                 if (ret != I40E_SUCCESS) {
3094                         PMD_DRV_LOG(ERR,
3095                                     "Set switch config failed aq_err: %d",
3096                                     hw->aq.asq_last_status);
3097                         ret = -EIO;
3098                 }
3099         } else
3100                 /* If NVM API < 1.7, keep the register setting */
3101                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3102                                                       tpid, qinq);
3103
3104         return ret;
3105 }
3106
3107 static void
3108 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3109 {
3110         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3111         struct i40e_vsi *vsi = pf->main_vsi;
3112
3113         if (mask & ETH_VLAN_FILTER_MASK) {
3114                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3115                         i40e_vsi_config_vlan_filter(vsi, TRUE);
3116                 else
3117                         i40e_vsi_config_vlan_filter(vsi, FALSE);
3118         }
3119
3120         if (mask & ETH_VLAN_STRIP_MASK) {
3121                 /* Enable or disable VLAN stripping */
3122                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
3123                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
3124                 else
3125                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
3126         }
3127
3128         if (mask & ETH_VLAN_EXTEND_MASK) {
3129                 if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
3130                         i40e_vsi_config_double_vlan(vsi, TRUE);
3131                         /* Set global registers with default ethertype. */
3132                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3133                                            ETHER_TYPE_VLAN);
3134                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3135                                            ETHER_TYPE_VLAN);
3136                 }
3137                 else
3138                         i40e_vsi_config_double_vlan(vsi, FALSE);
3139         }
3140 }
3141
3142 static void
3143 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3144                           __rte_unused uint16_t queue,
3145                           __rte_unused int on)
3146 {
3147         PMD_INIT_FUNC_TRACE();
3148 }
3149
3150 static int
3151 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3152 {
3153         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3154         struct i40e_vsi *vsi = pf->main_vsi;
3155         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3156         struct i40e_vsi_vlan_pvid_info info;
3157
3158         memset(&info, 0, sizeof(info));
3159         info.on = on;
3160         if (info.on)
3161                 info.config.pvid = pvid;
3162         else {
3163                 info.config.reject.tagged =
3164                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
3165                 info.config.reject.untagged =
3166                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
3167         }
3168
3169         return i40e_vsi_vlan_pvid_set(vsi, &info);
3170 }
3171
3172 static int
3173 i40e_dev_led_on(struct rte_eth_dev *dev)
3174 {
3175         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3176         uint32_t mode = i40e_led_get(hw);
3177
3178         if (mode == 0)
3179                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3180
3181         return 0;
3182 }
3183
3184 static int
3185 i40e_dev_led_off(struct rte_eth_dev *dev)
3186 {
3187         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3188         uint32_t mode = i40e_led_get(hw);
3189
3190         if (mode != 0)
3191                 i40e_led_set(hw, 0, false);
3192
3193         return 0;
3194 }
3195
3196 static int
3197 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3198 {
3199         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3200         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3201
3202         fc_conf->pause_time = pf->fc_conf.pause_time;
3203         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3204         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3205
3206          /* Return current mode according to actual setting*/
3207         switch (hw->fc.current_mode) {
3208         case I40E_FC_FULL:
3209                 fc_conf->mode = RTE_FC_FULL;
3210                 break;
3211         case I40E_FC_TX_PAUSE:
3212                 fc_conf->mode = RTE_FC_TX_PAUSE;
3213                 break;
3214         case I40E_FC_RX_PAUSE:
3215                 fc_conf->mode = RTE_FC_RX_PAUSE;
3216                 break;
3217         case I40E_FC_NONE:
3218         default:
3219                 fc_conf->mode = RTE_FC_NONE;
3220         };
3221
3222         return 0;
3223 }
3224
3225 static int
3226 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3227 {
3228         uint32_t mflcn_reg, fctrl_reg, reg;
3229         uint32_t max_high_water;
3230         uint8_t i, aq_failure;
3231         int err;
3232         struct i40e_hw *hw;
3233         struct i40e_pf *pf;
3234         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3235                 [RTE_FC_NONE] = I40E_FC_NONE,
3236                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3237                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3238                 [RTE_FC_FULL] = I40E_FC_FULL
3239         };
3240
3241         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3242
3243         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3244         if ((fc_conf->high_water > max_high_water) ||
3245                         (fc_conf->high_water < fc_conf->low_water)) {
3246                 PMD_INIT_LOG(ERR,
3247                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
3248                         max_high_water);
3249                 return -EINVAL;
3250         }
3251
3252         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3253         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3254         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3255
3256         pf->fc_conf.pause_time = fc_conf->pause_time;
3257         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3258         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3259
3260         PMD_INIT_FUNC_TRACE();
3261
3262         /* All the link flow control related enable/disable register
3263          * configuration is handle by the F/W
3264          */
3265         err = i40e_set_fc(hw, &aq_failure, true);
3266         if (err < 0)
3267                 return -ENOSYS;
3268
3269         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3270                 /* Configure flow control refresh threshold,
3271                  * the value for stat_tx_pause_refresh_timer[8]
3272                  * is used for global pause operation.
3273                  */
3274
3275                 I40E_WRITE_REG(hw,
3276                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3277                                pf->fc_conf.pause_time);
3278
3279                 /* configure the timer value included in transmitted pause
3280                  * frame,
3281                  * the value for stat_tx_pause_quanta[8] is used for global
3282                  * pause operation
3283                  */
3284                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3285                                pf->fc_conf.pause_time);
3286
3287                 fctrl_reg = I40E_READ_REG(hw,
3288                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3289
3290                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3291                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3292                 else
3293                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3294
3295                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3296                                fctrl_reg);
3297         } else {
3298                 /* Configure pause time (2 TCs per register) */
3299                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3300                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3301                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3302
3303                 /* Configure flow control refresh threshold value */
3304                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3305                                pf->fc_conf.pause_time / 2);
3306
3307                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3308
3309                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3310                  *depending on configuration
3311                  */
3312                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3313                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3314                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3315                 } else {
3316                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3317                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3318                 }
3319
3320                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3321         }
3322
3323         /* config the water marker both based on the packets and bytes */
3324         I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
3325                        (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3326                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3327         I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
3328                        (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3329                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3330         I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
3331                        pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3332                        << I40E_KILOSHIFT);
3333         I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
3334                        pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3335                        << I40E_KILOSHIFT);
3336
3337         I40E_WRITE_FLUSH(hw);
3338
3339         return 0;
3340 }
3341
3342 static int
3343 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3344                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3345 {
3346         PMD_INIT_FUNC_TRACE();
3347
3348         return -ENOSYS;
3349 }
3350
3351 /* Add a MAC address, and update filters */
3352 static int
3353 i40e_macaddr_add(struct rte_eth_dev *dev,
3354                  struct ether_addr *mac_addr,
3355                  __rte_unused uint32_t index,
3356                  uint32_t pool)
3357 {
3358         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3359         struct i40e_mac_filter_info mac_filter;
3360         struct i40e_vsi *vsi;
3361         int ret;
3362
3363         /* If VMDQ not enabled or configured, return */
3364         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3365                           !pf->nb_cfg_vmdq_vsi)) {
3366                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3367                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3368                         pool);
3369                 return -ENOTSUP;
3370         }
3371
3372         if (pool > pf->nb_cfg_vmdq_vsi) {
3373                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3374                                 pool, pf->nb_cfg_vmdq_vsi);
3375                 return -EINVAL;
3376         }
3377
3378         (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3379         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3380                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3381         else
3382                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3383
3384         if (pool == 0)
3385                 vsi = pf->main_vsi;
3386         else
3387                 vsi = pf->vmdq[pool - 1].vsi;
3388
3389         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3390         if (ret != I40E_SUCCESS) {
3391                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3392                 return -ENODEV;
3393         }
3394         return 0;
3395 }
3396
3397 /* Remove a MAC address, and update filters */
3398 static void
3399 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3400 {
3401         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3402         struct i40e_vsi *vsi;
3403         struct rte_eth_dev_data *data = dev->data;
3404         struct ether_addr *macaddr;
3405         int ret;
3406         uint32_t i;
3407         uint64_t pool_sel;
3408
3409         macaddr = &(data->mac_addrs[index]);
3410
3411         pool_sel = dev->data->mac_pool_sel[index];
3412
3413         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3414                 if (pool_sel & (1ULL << i)) {
3415                         if (i == 0)
3416                                 vsi = pf->main_vsi;
3417                         else {
3418                                 /* No VMDQ pool enabled or configured */
3419                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3420                                         (i > pf->nb_cfg_vmdq_vsi)) {
3421                                         PMD_DRV_LOG(ERR,
3422                                                 "No VMDQ pool enabled/configured");
3423                                         return;
3424                                 }
3425                                 vsi = pf->vmdq[i - 1].vsi;
3426                         }
3427                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3428
3429                         if (ret) {
3430                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3431                                 return;
3432                         }
3433                 }
3434         }
3435 }
3436
3437 /* Set perfect match or hash match of MAC and VLAN for a VF */
3438 static int
3439 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3440                  struct rte_eth_mac_filter *filter,
3441                  bool add)
3442 {
3443         struct i40e_hw *hw;
3444         struct i40e_mac_filter_info mac_filter;
3445         struct ether_addr old_mac;
3446         struct ether_addr *new_mac;
3447         struct i40e_pf_vf *vf = NULL;
3448         uint16_t vf_id;
3449         int ret;
3450
3451         if (pf == NULL) {
3452                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3453                 return -EINVAL;
3454         }
3455         hw = I40E_PF_TO_HW(pf);
3456
3457         if (filter == NULL) {
3458                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3459                 return -EINVAL;
3460         }
3461
3462         new_mac = &filter->mac_addr;
3463
3464         if (is_zero_ether_addr(new_mac)) {
3465                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3466                 return -EINVAL;
3467         }
3468
3469         vf_id = filter->dst_id;
3470
3471         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3472                 PMD_DRV_LOG(ERR, "Invalid argument.");
3473                 return -EINVAL;
3474         }
3475         vf = &pf->vfs[vf_id];
3476
3477         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3478                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3479                 return -EINVAL;
3480         }
3481
3482         if (add) {
3483                 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3484                 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3485                                 ETHER_ADDR_LEN);
3486                 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3487                                  ETHER_ADDR_LEN);
3488
3489                 mac_filter.filter_type = filter->filter_type;
3490                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3491                 if (ret != I40E_SUCCESS) {
3492                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3493                         return -1;
3494                 }
3495                 ether_addr_copy(new_mac, &pf->dev_addr);
3496         } else {
3497                 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3498                                 ETHER_ADDR_LEN);
3499                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3500                 if (ret != I40E_SUCCESS) {
3501                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3502                         return -1;
3503                 }
3504
3505                 /* Clear device address as it has been removed */
3506                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3507                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3508         }
3509
3510         return 0;
3511 }
3512
3513 /* MAC filter handle */
3514 static int
3515 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3516                 void *arg)
3517 {
3518         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3519         struct rte_eth_mac_filter *filter;
3520         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3521         int ret = I40E_NOT_SUPPORTED;
3522
3523         filter = (struct rte_eth_mac_filter *)(arg);
3524
3525         switch (filter_op) {
3526         case RTE_ETH_FILTER_NOP:
3527                 ret = I40E_SUCCESS;
3528                 break;
3529         case RTE_ETH_FILTER_ADD:
3530                 i40e_pf_disable_irq0(hw);
3531                 if (filter->is_vf)
3532                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3533                 i40e_pf_enable_irq0(hw);
3534                 break;
3535         case RTE_ETH_FILTER_DELETE:
3536                 i40e_pf_disable_irq0(hw);
3537                 if (filter->is_vf)
3538                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3539                 i40e_pf_enable_irq0(hw);
3540                 break;
3541         default:
3542                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3543                 ret = I40E_ERR_PARAM;
3544                 break;
3545         }
3546
3547         return ret;
3548 }
3549
3550 static int
3551 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3552 {
3553         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3554         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3555         int ret;
3556
3557         if (!lut)
3558                 return -EINVAL;
3559
3560         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3561                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3562                                           lut, lut_size);
3563                 if (ret) {
3564                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3565                         return ret;
3566                 }
3567         } else {
3568                 uint32_t *lut_dw = (uint32_t *)lut;
3569                 uint16_t i, lut_size_dw = lut_size / 4;
3570
3571                 for (i = 0; i < lut_size_dw; i++)
3572                         lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
3573         }
3574
3575         return 0;
3576 }
3577
3578 static int
3579 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3580 {
3581         struct i40e_pf *pf;
3582         struct i40e_hw *hw;
3583         int ret;
3584
3585         if (!vsi || !lut)
3586                 return -EINVAL;
3587
3588         pf = I40E_VSI_TO_PF(vsi);
3589         hw = I40E_VSI_TO_HW(vsi);
3590
3591         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3592                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
3593                                           lut, lut_size);
3594                 if (ret) {
3595                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3596                         return ret;
3597                 }
3598         } else {
3599                 uint32_t *lut_dw = (uint32_t *)lut;
3600                 uint16_t i, lut_size_dw = lut_size / 4;
3601
3602                 for (i = 0; i < lut_size_dw; i++)
3603                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
3604                 I40E_WRITE_FLUSH(hw);
3605         }
3606
3607         return 0;
3608 }
3609
3610 static int
3611 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
3612                          struct rte_eth_rss_reta_entry64 *reta_conf,
3613                          uint16_t reta_size)
3614 {
3615         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3616         uint16_t i, lut_size = pf->hash_lut_size;
3617         uint16_t idx, shift;
3618         uint8_t *lut;
3619         int ret;
3620
3621         if (reta_size != lut_size ||
3622                 reta_size > ETH_RSS_RETA_SIZE_512) {
3623                 PMD_DRV_LOG(ERR,
3624                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3625                         reta_size, lut_size);
3626                 return -EINVAL;
3627         }
3628
3629         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3630         if (!lut) {
3631                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3632                 return -ENOMEM;
3633         }
3634         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3635         if (ret)
3636                 goto out;
3637         for (i = 0; i < reta_size; i++) {
3638                 idx = i / RTE_RETA_GROUP_SIZE;
3639                 shift = i % RTE_RETA_GROUP_SIZE;
3640                 if (reta_conf[idx].mask & (1ULL << shift))
3641                         lut[i] = reta_conf[idx].reta[shift];
3642         }
3643         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
3644
3645 out:
3646         rte_free(lut);
3647
3648         return ret;
3649 }
3650
3651 static int
3652 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
3653                         struct rte_eth_rss_reta_entry64 *reta_conf,
3654                         uint16_t reta_size)
3655 {
3656         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3657         uint16_t i, lut_size = pf->hash_lut_size;
3658         uint16_t idx, shift;
3659         uint8_t *lut;
3660         int ret;
3661
3662         if (reta_size != lut_size ||
3663                 reta_size > ETH_RSS_RETA_SIZE_512) {
3664                 PMD_DRV_LOG(ERR,
3665                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3666                         reta_size, lut_size);
3667                 return -EINVAL;
3668         }
3669
3670         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3671         if (!lut) {
3672                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3673                 return -ENOMEM;
3674         }
3675
3676         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3677         if (ret)
3678                 goto out;
3679         for (i = 0; i < reta_size; i++) {
3680                 idx = i / RTE_RETA_GROUP_SIZE;
3681                 shift = i % RTE_RETA_GROUP_SIZE;
3682                 if (reta_conf[idx].mask & (1ULL << shift))
3683                         reta_conf[idx].reta[shift] = lut[i];
3684         }
3685
3686 out:
3687         rte_free(lut);
3688
3689         return ret;
3690 }
3691
3692 /**
3693  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
3694  * @hw:   pointer to the HW structure
3695  * @mem:  pointer to mem struct to fill out
3696  * @size: size of memory requested
3697  * @alignment: what to align the allocation to
3698  **/
3699 enum i40e_status_code
3700 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3701                         struct i40e_dma_mem *mem,
3702                         u64 size,
3703                         u32 alignment)
3704 {
3705         const struct rte_memzone *mz = NULL;
3706         char z_name[RTE_MEMZONE_NAMESIZE];
3707
3708         if (!mem)
3709                 return I40E_ERR_PARAM;
3710
3711         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
3712         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
3713                                          alignment, RTE_PGSIZE_2M);
3714         if (!mz)
3715                 return I40E_ERR_NO_MEMORY;
3716
3717         mem->size = size;
3718         mem->va = mz->addr;
3719         mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
3720         mem->zone = (const void *)mz;
3721         PMD_DRV_LOG(DEBUG,
3722                 "memzone %s allocated with physical address: %"PRIu64,
3723                 mz->name, mem->pa);
3724
3725         return I40E_SUCCESS;
3726 }
3727
3728 /**
3729  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
3730  * @hw:   pointer to the HW structure
3731  * @mem:  ptr to mem struct to free
3732  **/
3733 enum i40e_status_code
3734 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3735                     struct i40e_dma_mem *mem)
3736 {
3737         if (!mem)
3738                 return I40E_ERR_PARAM;
3739
3740         PMD_DRV_LOG(DEBUG,
3741                 "memzone %s to be freed with physical address: %"PRIu64,
3742                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
3743         rte_memzone_free((const struct rte_memzone *)mem->zone);
3744         mem->zone = NULL;
3745         mem->va = NULL;
3746         mem->pa = (u64)0;
3747
3748         return I40E_SUCCESS;
3749 }
3750
3751 /**
3752  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
3753  * @hw:   pointer to the HW structure
3754  * @mem:  pointer to mem struct to fill out
3755  * @size: size of memory requested
3756  **/
3757 enum i40e_status_code
3758 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3759                          struct i40e_virt_mem *mem,
3760                          u32 size)
3761 {
3762         if (!mem)
3763                 return I40E_ERR_PARAM;
3764
3765         mem->size = size;
3766         mem->va = rte_zmalloc("i40e", size, 0);
3767
3768         if (mem->va)
3769                 return I40E_SUCCESS;
3770         else
3771                 return I40E_ERR_NO_MEMORY;
3772 }
3773
3774 /**
3775  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
3776  * @hw:   pointer to the HW structure
3777  * @mem:  pointer to mem struct to free
3778  **/
3779 enum i40e_status_code
3780 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3781                      struct i40e_virt_mem *mem)
3782 {
3783         if (!mem)
3784                 return I40E_ERR_PARAM;
3785
3786         rte_free(mem->va);
3787         mem->va = NULL;
3788
3789         return I40E_SUCCESS;
3790 }
3791
3792 void
3793 i40e_init_spinlock_d(struct i40e_spinlock *sp)
3794 {
3795         rte_spinlock_init(&sp->spinlock);
3796 }
3797
3798 void
3799 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
3800 {
3801         rte_spinlock_lock(&sp->spinlock);
3802 }
3803
3804 void
3805 i40e_release_spinlock_d(struct i40e_spinlock *sp)
3806 {
3807         rte_spinlock_unlock(&sp->spinlock);
3808 }
3809
3810 void
3811 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
3812 {
3813         return;
3814 }
3815
3816 /**
3817  * Get the hardware capabilities, which will be parsed
3818  * and saved into struct i40e_hw.
3819  */
3820 static int
3821 i40e_get_cap(struct i40e_hw *hw)
3822 {
3823         struct i40e_aqc_list_capabilities_element_resp *buf;
3824         uint16_t len, size = 0;
3825         int ret;
3826
3827         /* Calculate a huge enough buff for saving response data temporarily */
3828         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
3829                                                 I40E_MAX_CAP_ELE_NUM;
3830         buf = rte_zmalloc("i40e", len, 0);
3831         if (!buf) {
3832                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
3833                 return I40E_ERR_NO_MEMORY;
3834         }
3835
3836         /* Get, parse the capabilities and save it to hw */
3837         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
3838                         i40e_aqc_opc_list_func_capabilities, NULL);
3839         if (ret != I40E_SUCCESS)
3840                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
3841
3842         /* Free the temporary buffer after being used */
3843         rte_free(buf);
3844
3845         return ret;
3846 }
3847
3848 static int
3849 i40e_pf_parameter_init(struct rte_eth_dev *dev)
3850 {
3851         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3852         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3853         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3854         uint16_t qp_count = 0, vsi_count = 0;
3855
3856         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
3857                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
3858                 return -EINVAL;
3859         }
3860         /* Add the parameter init for LFC */
3861         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
3862         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
3863         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
3864
3865         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
3866         pf->max_num_vsi = hw->func_caps.num_vsis;
3867         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
3868         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
3869         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3870
3871         /* FDir queue/VSI allocation */
3872         pf->fdir_qp_offset = 0;
3873         if (hw->func_caps.fd) {
3874                 pf->flags |= I40E_FLAG_FDIR;
3875                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
3876         } else {
3877                 pf->fdir_nb_qps = 0;
3878         }
3879         qp_count += pf->fdir_nb_qps;
3880         vsi_count += 1;
3881
3882         /* LAN queue/VSI allocation */
3883         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
3884         if (!hw->func_caps.rss) {
3885                 pf->lan_nb_qps = 1;
3886         } else {
3887                 pf->flags |= I40E_FLAG_RSS;
3888                 if (hw->mac.type == I40E_MAC_X722)
3889                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
3890                 pf->lan_nb_qps = pf->lan_nb_qp_max;
3891         }
3892         qp_count += pf->lan_nb_qps;
3893         vsi_count += 1;
3894
3895         /* VF queue/VSI allocation */
3896         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
3897         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
3898                 pf->flags |= I40E_FLAG_SRIOV;
3899                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3900                 pf->vf_num = pci_dev->max_vfs;
3901                 PMD_DRV_LOG(DEBUG,
3902                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
3903                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
3904         } else {
3905                 pf->vf_nb_qps = 0;
3906                 pf->vf_num = 0;
3907         }
3908         qp_count += pf->vf_nb_qps * pf->vf_num;
3909         vsi_count += pf->vf_num;
3910
3911         /* VMDq queue/VSI allocation */
3912         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
3913         pf->vmdq_nb_qps = 0;
3914         pf->max_nb_vmdq_vsi = 0;
3915         if (hw->func_caps.vmdq) {
3916                 if (qp_count < hw->func_caps.num_tx_qp &&
3917                         vsi_count < hw->func_caps.num_vsis) {
3918                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
3919                                 qp_count) / pf->vmdq_nb_qp_max;
3920
3921                         /* Limit the maximum number of VMDq vsi to the maximum
3922                          * ethdev can support
3923                          */
3924                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3925                                 hw->func_caps.num_vsis - vsi_count);
3926                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3927                                 ETH_64_POOLS);
3928                         if (pf->max_nb_vmdq_vsi) {
3929                                 pf->flags |= I40E_FLAG_VMDQ;
3930                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
3931                                 PMD_DRV_LOG(DEBUG,
3932                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
3933                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
3934                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
3935                         } else {
3936                                 PMD_DRV_LOG(INFO,
3937                                         "No enough queues left for VMDq");
3938                         }
3939                 } else {
3940                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
3941                 }
3942         }
3943         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
3944         vsi_count += pf->max_nb_vmdq_vsi;
3945
3946         if (hw->func_caps.dcb)
3947                 pf->flags |= I40E_FLAG_DCB;
3948
3949         if (qp_count > hw->func_caps.num_tx_qp) {
3950                 PMD_DRV_LOG(ERR,
3951                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
3952                         qp_count, hw->func_caps.num_tx_qp);
3953                 return -EINVAL;
3954         }
3955         if (vsi_count > hw->func_caps.num_vsis) {
3956                 PMD_DRV_LOG(ERR,
3957                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
3958                         vsi_count, hw->func_caps.num_vsis);
3959                 return -EINVAL;
3960         }
3961
3962         return 0;
3963 }
3964
3965 static int
3966 i40e_pf_get_switch_config(struct i40e_pf *pf)
3967 {
3968         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3969         struct i40e_aqc_get_switch_config_resp *switch_config;
3970         struct i40e_aqc_switch_config_element_resp *element;
3971         uint16_t start_seid = 0, num_reported;
3972         int ret;
3973
3974         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
3975                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
3976         if (!switch_config) {
3977                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
3978                 return -ENOMEM;
3979         }
3980
3981         /* Get the switch configurations */
3982         ret = i40e_aq_get_switch_config(hw, switch_config,
3983                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
3984         if (ret != I40E_SUCCESS) {
3985                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
3986                 goto fail;
3987         }
3988         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
3989         if (num_reported != 1) { /* The number should be 1 */
3990                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
3991                 goto fail;
3992         }
3993
3994         /* Parse the switch configuration elements */
3995         element = &(switch_config->element[0]);
3996         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
3997                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
3998                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
3999         } else
4000                 PMD_DRV_LOG(INFO, "Unknown element type");
4001
4002 fail:
4003         rte_free(switch_config);
4004
4005         return ret;
4006 }
4007
4008 static int
4009 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4010                         uint32_t num)
4011 {
4012         struct pool_entry *entry;
4013
4014         if (pool == NULL || num == 0)
4015                 return -EINVAL;
4016
4017         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4018         if (entry == NULL) {
4019                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4020                 return -ENOMEM;
4021         }
4022
4023         /* queue heap initialize */
4024         pool->num_free = num;
4025         pool->num_alloc = 0;
4026         pool->base = base;
4027         LIST_INIT(&pool->alloc_list);
4028         LIST_INIT(&pool->free_list);
4029
4030         /* Initialize element  */
4031         entry->base = 0;
4032         entry->len = num;
4033
4034         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4035         return 0;
4036 }
4037
4038 static void
4039 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4040 {
4041         struct pool_entry *entry, *next_entry;
4042
4043         if (pool == NULL)
4044                 return;
4045
4046         for (entry = LIST_FIRST(&pool->alloc_list);
4047                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4048                         entry = next_entry) {
4049                 LIST_REMOVE(entry, next);
4050                 rte_free(entry);
4051         }
4052
4053         for (entry = LIST_FIRST(&pool->free_list);
4054                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4055                         entry = next_entry) {
4056                 LIST_REMOVE(entry, next);
4057                 rte_free(entry);
4058         }
4059
4060         pool->num_free = 0;
4061         pool->num_alloc = 0;
4062         pool->base = 0;
4063         LIST_INIT(&pool->alloc_list);
4064         LIST_INIT(&pool->free_list);
4065 }
4066
4067 static int
4068 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4069                        uint32_t base)
4070 {
4071         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4072         uint32_t pool_offset;
4073         int insert;
4074
4075         if (pool == NULL) {
4076                 PMD_DRV_LOG(ERR, "Invalid parameter");
4077                 return -EINVAL;
4078         }
4079
4080         pool_offset = base - pool->base;
4081         /* Lookup in alloc list */
4082         LIST_FOREACH(entry, &pool->alloc_list, next) {
4083                 if (entry->base == pool_offset) {
4084                         valid_entry = entry;
4085                         LIST_REMOVE(entry, next);
4086                         break;
4087                 }
4088         }
4089
4090         /* Not find, return */
4091         if (valid_entry == NULL) {
4092                 PMD_DRV_LOG(ERR, "Failed to find entry");
4093                 return -EINVAL;
4094         }
4095
4096         /**
4097          * Found it, move it to free list  and try to merge.
4098          * In order to make merge easier, always sort it by qbase.
4099          * Find adjacent prev and last entries.
4100          */
4101         prev = next = NULL;
4102         LIST_FOREACH(entry, &pool->free_list, next) {
4103                 if (entry->base > valid_entry->base) {
4104                         next = entry;
4105                         break;
4106                 }
4107                 prev = entry;
4108         }
4109
4110         insert = 0;
4111         /* Try to merge with next one*/
4112         if (next != NULL) {
4113                 /* Merge with next one */
4114                 if (valid_entry->base + valid_entry->len == next->base) {
4115                         next->base = valid_entry->base;
4116                         next->len += valid_entry->len;
4117                         rte_free(valid_entry);
4118                         valid_entry = next;
4119                         insert = 1;
4120                 }
4121         }
4122
4123         if (prev != NULL) {
4124                 /* Merge with previous one */
4125                 if (prev->base + prev->len == valid_entry->base) {
4126                         prev->len += valid_entry->len;
4127                         /* If it merge with next one, remove next node */
4128                         if (insert == 1) {
4129                                 LIST_REMOVE(valid_entry, next);
4130                                 rte_free(valid_entry);
4131                         } else {
4132                                 rte_free(valid_entry);
4133                                 insert = 1;
4134                         }
4135                 }
4136         }
4137
4138         /* Not find any entry to merge, insert */
4139         if (insert == 0) {
4140                 if (prev != NULL)
4141                         LIST_INSERT_AFTER(prev, valid_entry, next);
4142                 else if (next != NULL)
4143                         LIST_INSERT_BEFORE(next, valid_entry, next);
4144                 else /* It's empty list, insert to head */
4145                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4146         }
4147
4148         pool->num_free += valid_entry->len;
4149         pool->num_alloc -= valid_entry->len;
4150
4151         return 0;
4152 }
4153
4154 static int
4155 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4156                        uint16_t num)
4157 {
4158         struct pool_entry *entry, *valid_entry;
4159
4160         if (pool == NULL || num == 0) {
4161                 PMD_DRV_LOG(ERR, "Invalid parameter");
4162                 return -EINVAL;
4163         }
4164
4165         if (pool->num_free < num) {
4166                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4167                             num, pool->num_free);
4168                 return -ENOMEM;
4169         }
4170
4171         valid_entry = NULL;
4172         /* Lookup  in free list and find most fit one */
4173         LIST_FOREACH(entry, &pool->free_list, next) {
4174                 if (entry->len >= num) {
4175                         /* Find best one */
4176                         if (entry->len == num) {
4177                                 valid_entry = entry;
4178                                 break;
4179                         }
4180                         if (valid_entry == NULL || valid_entry->len > entry->len)
4181                                 valid_entry = entry;
4182                 }
4183         }
4184
4185         /* Not find one to satisfy the request, return */
4186         if (valid_entry == NULL) {
4187                 PMD_DRV_LOG(ERR, "No valid entry found");
4188                 return -ENOMEM;
4189         }
4190         /**
4191          * The entry have equal queue number as requested,
4192          * remove it from alloc_list.
4193          */
4194         if (valid_entry->len == num) {
4195                 LIST_REMOVE(valid_entry, next);
4196         } else {
4197                 /**
4198                  * The entry have more numbers than requested,
4199                  * create a new entry for alloc_list and minus its
4200                  * queue base and number in free_list.
4201                  */
4202                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4203                 if (entry == NULL) {
4204                         PMD_DRV_LOG(ERR,
4205                                 "Failed to allocate memory for resource pool");
4206                         return -ENOMEM;
4207                 }
4208                 entry->base = valid_entry->base;
4209                 entry->len = num;
4210                 valid_entry->base += num;
4211                 valid_entry->len -= num;
4212                 valid_entry = entry;
4213         }
4214
4215         /* Insert it into alloc list, not sorted */
4216         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4217
4218         pool->num_free -= valid_entry->len;
4219         pool->num_alloc += valid_entry->len;
4220
4221         return valid_entry->base + pool->base;
4222 }
4223
4224 /**
4225  * bitmap_is_subset - Check whether src2 is subset of src1
4226  **/
4227 static inline int
4228 bitmap_is_subset(uint8_t src1, uint8_t src2)
4229 {
4230         return !((src1 ^ src2) & src2);
4231 }
4232
4233 static enum i40e_status_code
4234 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4235 {
4236         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4237
4238         /* If DCB is not supported, only default TC is supported */
4239         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4240                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4241                 return I40E_NOT_SUPPORTED;
4242         }
4243
4244         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4245                 PMD_DRV_LOG(ERR,
4246                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
4247                         hw->func_caps.enabled_tcmap, enabled_tcmap);
4248                 return I40E_NOT_SUPPORTED;
4249         }
4250         return I40E_SUCCESS;
4251 }
4252
4253 int
4254 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4255                                 struct i40e_vsi_vlan_pvid_info *info)
4256 {
4257         struct i40e_hw *hw;
4258         struct i40e_vsi_context ctxt;
4259         uint8_t vlan_flags = 0;
4260         int ret;
4261
4262         if (vsi == NULL || info == NULL) {
4263                 PMD_DRV_LOG(ERR, "invalid parameters");
4264                 return I40E_ERR_PARAM;
4265         }
4266
4267         if (info->on) {
4268                 vsi->info.pvid = info->config.pvid;
4269                 /**
4270                  * If insert pvid is enabled, only tagged pkts are
4271                  * allowed to be sent out.
4272                  */
4273                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4274                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4275         } else {
4276                 vsi->info.pvid = 0;
4277                 if (info->config.reject.tagged == 0)
4278                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4279
4280                 if (info->config.reject.untagged == 0)
4281                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4282         }
4283         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4284                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
4285         vsi->info.port_vlan_flags |= vlan_flags;
4286         vsi->info.valid_sections =
4287                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4288         memset(&ctxt, 0, sizeof(ctxt));
4289         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4290         ctxt.seid = vsi->seid;
4291
4292         hw = I40E_VSI_TO_HW(vsi);
4293         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4294         if (ret != I40E_SUCCESS)
4295                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4296
4297         return ret;
4298 }
4299
4300 static int
4301 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4302 {
4303         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4304         int i, ret;
4305         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4306
4307         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4308         if (ret != I40E_SUCCESS)
4309                 return ret;
4310
4311         if (!vsi->seid) {
4312                 PMD_DRV_LOG(ERR, "seid not valid");
4313                 return -EINVAL;
4314         }
4315
4316         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4317         tc_bw_data.tc_valid_bits = enabled_tcmap;
4318         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4319                 tc_bw_data.tc_bw_credits[i] =
4320                         (enabled_tcmap & (1 << i)) ? 1 : 0;
4321
4322         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4323         if (ret != I40E_SUCCESS) {
4324                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4325                 return ret;
4326         }
4327
4328         (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4329                                         sizeof(vsi->info.qs_handle));
4330         return I40E_SUCCESS;
4331 }
4332
4333 static enum i40e_status_code
4334 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4335                                  struct i40e_aqc_vsi_properties_data *info,
4336                                  uint8_t enabled_tcmap)
4337 {
4338         enum i40e_status_code ret;
4339         int i, total_tc = 0;
4340         uint16_t qpnum_per_tc, bsf, qp_idx;
4341
4342         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4343         if (ret != I40E_SUCCESS)
4344                 return ret;
4345
4346         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4347                 if (enabled_tcmap & (1 << i))
4348                         total_tc++;
4349         if (total_tc == 0)
4350                 total_tc = 1;
4351         vsi->enabled_tc = enabled_tcmap;
4352
4353         /* Number of queues per enabled TC */
4354         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4355         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4356         bsf = rte_bsf32(qpnum_per_tc);
4357
4358         /* Adjust the queue number to actual queues that can be applied */
4359         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4360                 vsi->nb_qps = qpnum_per_tc * total_tc;
4361
4362         /**
4363          * Configure TC and queue mapping parameters, for enabled TC,
4364          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4365          * default queue will serve it.
4366          */
4367         qp_idx = 0;
4368         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4369                 if (vsi->enabled_tc & (1 << i)) {
4370                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4371                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4372                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4373                         qp_idx += qpnum_per_tc;
4374                 } else
4375                         info->tc_mapping[i] = 0;
4376         }
4377
4378         /* Associate queue number with VSI */
4379         if (vsi->type == I40E_VSI_SRIOV) {
4380                 info->mapping_flags |=
4381                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4382                 for (i = 0; i < vsi->nb_qps; i++)
4383                         info->queue_mapping[i] =
4384                                 rte_cpu_to_le_16(vsi->base_queue + i);
4385         } else {
4386                 info->mapping_flags |=
4387                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4388                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4389         }
4390         info->valid_sections |=
4391                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4392
4393         return I40E_SUCCESS;
4394 }
4395
4396 static int
4397 i40e_veb_release(struct i40e_veb *veb)
4398 {
4399         struct i40e_vsi *vsi;
4400         struct i40e_hw *hw;
4401
4402         if (veb == NULL)
4403                 return -EINVAL;
4404
4405         if (!TAILQ_EMPTY(&veb->head)) {
4406                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4407                 return -EACCES;
4408         }
4409         /* associate_vsi field is NULL for floating VEB */
4410         if (veb->associate_vsi != NULL) {
4411                 vsi = veb->associate_vsi;
4412                 hw = I40E_VSI_TO_HW(vsi);
4413
4414                 vsi->uplink_seid = veb->uplink_seid;
4415                 vsi->veb = NULL;
4416         } else {
4417                 veb->associate_pf->main_vsi->floating_veb = NULL;
4418                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4419         }
4420
4421         i40e_aq_delete_element(hw, veb->seid, NULL);
4422         rte_free(veb);
4423         return I40E_SUCCESS;
4424 }
4425
4426 /* Setup a veb */
4427 static struct i40e_veb *
4428 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4429 {
4430         struct i40e_veb *veb;
4431         int ret;
4432         struct i40e_hw *hw;
4433
4434         if (pf == NULL) {
4435                 PMD_DRV_LOG(ERR,
4436                             "veb setup failed, associated PF shouldn't null");
4437                 return NULL;
4438         }
4439         hw = I40E_PF_TO_HW(pf);
4440
4441         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4442         if (!veb) {
4443                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4444                 goto fail;
4445         }
4446
4447         veb->associate_vsi = vsi;
4448         veb->associate_pf = pf;
4449         TAILQ_INIT(&veb->head);
4450         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4451
4452         /* create floating veb if vsi is NULL */
4453         if (vsi != NULL) {
4454                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4455                                       I40E_DEFAULT_TCMAP, false,
4456                                       &veb->seid, false, NULL);
4457         } else {
4458                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4459                                       true, &veb->seid, false, NULL);
4460         }
4461
4462         if (ret != I40E_SUCCESS) {
4463                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4464                             hw->aq.asq_last_status);
4465                 goto fail;
4466         }
4467         veb->enabled_tc = I40E_DEFAULT_TCMAP;
4468
4469         /* get statistics index */
4470         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4471                                 &veb->stats_idx, NULL, NULL, NULL);
4472         if (ret != I40E_SUCCESS) {
4473                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
4474                             hw->aq.asq_last_status);
4475                 goto fail;
4476         }
4477         /* Get VEB bandwidth, to be implemented */
4478         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4479         if (vsi)
4480                 vsi->uplink_seid = veb->seid;
4481
4482         return veb;
4483 fail:
4484         rte_free(veb);
4485         return NULL;
4486 }
4487
4488 int
4489 i40e_vsi_release(struct i40e_vsi *vsi)
4490 {
4491         struct i40e_pf *pf;
4492         struct i40e_hw *hw;
4493         struct i40e_vsi_list *vsi_list;
4494         void *temp;
4495         int ret;
4496         struct i40e_mac_filter *f;
4497         uint16_t user_param;
4498
4499         if (!vsi)
4500                 return I40E_SUCCESS;
4501
4502         if (!vsi->adapter)
4503                 return -EFAULT;
4504
4505         user_param = vsi->user_param;
4506
4507         pf = I40E_VSI_TO_PF(vsi);
4508         hw = I40E_VSI_TO_HW(vsi);
4509
4510         /* VSI has child to attach, release child first */
4511         if (vsi->veb) {
4512                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
4513                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4514                                 return -1;
4515                 }
4516                 i40e_veb_release(vsi->veb);
4517         }
4518
4519         if (vsi->floating_veb) {
4520                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
4521                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4522                                 return -1;
4523                 }
4524         }
4525
4526         /* Remove all macvlan filters of the VSI */
4527         i40e_vsi_remove_all_macvlan_filter(vsi);
4528         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
4529                 rte_free(f);
4530
4531         if (vsi->type != I40E_VSI_MAIN &&
4532             ((vsi->type != I40E_VSI_SRIOV) ||
4533             !pf->floating_veb_list[user_param])) {
4534                 /* Remove vsi from parent's sibling list */
4535                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
4536                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4537                         return I40E_ERR_PARAM;
4538                 }
4539                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
4540                                 &vsi->sib_vsi_list, list);
4541
4542                 /* Remove all switch element of the VSI */
4543                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4544                 if (ret != I40E_SUCCESS)
4545                         PMD_DRV_LOG(ERR, "Failed to delete element");
4546         }
4547
4548         if ((vsi->type == I40E_VSI_SRIOV) &&
4549             pf->floating_veb_list[user_param]) {
4550                 /* Remove vsi from parent's sibling list */
4551                 if (vsi->parent_vsi == NULL ||
4552                     vsi->parent_vsi->floating_veb == NULL) {
4553                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4554                         return I40E_ERR_PARAM;
4555                 }
4556                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
4557                              &vsi->sib_vsi_list, list);
4558
4559                 /* Remove all switch element of the VSI */
4560                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4561                 if (ret != I40E_SUCCESS)
4562                         PMD_DRV_LOG(ERR, "Failed to delete element");
4563         }
4564
4565         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
4566
4567         if (vsi->type != I40E_VSI_SRIOV)
4568                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
4569         rte_free(vsi);
4570
4571         return I40E_SUCCESS;
4572 }
4573
4574 static int
4575 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
4576 {
4577         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4578         struct i40e_aqc_remove_macvlan_element_data def_filter;
4579         struct i40e_mac_filter_info filter;
4580         int ret;
4581
4582         if (vsi->type != I40E_VSI_MAIN)
4583                 return I40E_ERR_CONFIG;
4584         memset(&def_filter, 0, sizeof(def_filter));
4585         (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
4586                                         ETH_ADDR_LEN);
4587         def_filter.vlan_tag = 0;
4588         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4589                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4590         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
4591         if (ret != I40E_SUCCESS) {
4592                 struct i40e_mac_filter *f;
4593                 struct ether_addr *mac;
4594
4595                 PMD_DRV_LOG(DEBUG,
4596                             "Cannot remove the default macvlan filter");
4597                 /* It needs to add the permanent mac into mac list */
4598                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4599                 if (f == NULL) {
4600                         PMD_DRV_LOG(ERR, "failed to allocate memory");
4601                         return I40E_ERR_NO_MEMORY;
4602                 }
4603                 mac = &f->mac_info.mac_addr;
4604                 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
4605                                 ETH_ADDR_LEN);
4606                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4607                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4608                 vsi->mac_num++;
4609
4610                 return ret;
4611         }
4612         (void)rte_memcpy(&filter.mac_addr,
4613                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
4614         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4615         return i40e_vsi_add_mac(vsi, &filter);
4616 }
4617
4618 /*
4619  * i40e_vsi_get_bw_config - Query VSI BW Information
4620  * @vsi: the VSI to be queried
4621  *
4622  * Returns 0 on success, negative value on failure
4623  */
4624 static enum i40e_status_code
4625 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
4626 {
4627         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
4628         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
4629         struct i40e_hw *hw = &vsi->adapter->hw;
4630         i40e_status ret;
4631         int i;
4632         uint32_t bw_max;
4633
4634         memset(&bw_config, 0, sizeof(bw_config));
4635         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4636         if (ret != I40E_SUCCESS) {
4637                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
4638                             hw->aq.asq_last_status);
4639                 return ret;
4640         }
4641
4642         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
4643         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
4644                                         &ets_sla_config, NULL);
4645         if (ret != I40E_SUCCESS) {
4646                 PMD_DRV_LOG(ERR,
4647                         "VSI failed to get TC bandwdith configuration %u",
4648                         hw->aq.asq_last_status);
4649                 return ret;
4650         }
4651
4652         /* store and print out BW info */
4653         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
4654         vsi->bw_info.bw_max = bw_config.max_bw;
4655         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
4656         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
4657         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
4658                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
4659                      I40E_16_BIT_WIDTH);
4660         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4661                 vsi->bw_info.bw_ets_share_credits[i] =
4662                                 ets_sla_config.share_credits[i];
4663                 vsi->bw_info.bw_ets_credits[i] =
4664                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
4665                 /* 4 bits per TC, 4th bit is reserved */
4666                 vsi->bw_info.bw_ets_max[i] =
4667                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
4668                                   RTE_LEN2MASK(3, uint8_t));
4669                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
4670                             vsi->bw_info.bw_ets_share_credits[i]);
4671                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
4672                             vsi->bw_info.bw_ets_credits[i]);
4673                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
4674                             vsi->bw_info.bw_ets_max[i]);
4675         }
4676
4677         return I40E_SUCCESS;
4678 }
4679
4680 /* i40e_enable_pf_lb
4681  * @pf: pointer to the pf structure
4682  *
4683  * allow loopback on pf
4684  */
4685 static inline void
4686 i40e_enable_pf_lb(struct i40e_pf *pf)
4687 {
4688         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4689         struct i40e_vsi_context ctxt;
4690         int ret;
4691
4692         /* Use the FW API if FW >= v5.0 */
4693         if (hw->aq.fw_maj_ver < 5) {
4694                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
4695                 return;
4696         }
4697
4698         memset(&ctxt, 0, sizeof(ctxt));
4699         ctxt.seid = pf->main_vsi_seid;
4700         ctxt.pf_num = hw->pf_id;
4701         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4702         if (ret) {
4703                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
4704                             ret, hw->aq.asq_last_status);
4705                 return;
4706         }
4707         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4708         ctxt.info.valid_sections =
4709                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4710         ctxt.info.switch_id |=
4711                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4712
4713         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4714         if (ret)
4715                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
4716                             hw->aq.asq_last_status);
4717 }
4718
4719 /* Setup a VSI */
4720 struct i40e_vsi *
4721 i40e_vsi_setup(struct i40e_pf *pf,
4722                enum i40e_vsi_type type,
4723                struct i40e_vsi *uplink_vsi,
4724                uint16_t user_param)
4725 {
4726         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4727         struct i40e_vsi *vsi;
4728         struct i40e_mac_filter_info filter;
4729         int ret;
4730         struct i40e_vsi_context ctxt;
4731         struct ether_addr broadcast =
4732                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
4733
4734         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
4735             uplink_vsi == NULL) {
4736                 PMD_DRV_LOG(ERR,
4737                         "VSI setup failed, VSI link shouldn't be NULL");
4738                 return NULL;
4739         }
4740
4741         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
4742                 PMD_DRV_LOG(ERR,
4743                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
4744                 return NULL;
4745         }
4746
4747         /* two situations
4748          * 1.type is not MAIN and uplink vsi is not NULL
4749          * If uplink vsi didn't setup VEB, create one first under veb field
4750          * 2.type is SRIOV and the uplink is NULL
4751          * If floating VEB is NULL, create one veb under floating veb field
4752          */
4753
4754         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
4755             uplink_vsi->veb == NULL) {
4756                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
4757
4758                 if (uplink_vsi->veb == NULL) {
4759                         PMD_DRV_LOG(ERR, "VEB setup failed");
4760                         return NULL;
4761                 }
4762                 /* set ALLOWLOOPBACk on pf, when veb is created */
4763                 i40e_enable_pf_lb(pf);
4764         }
4765
4766         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
4767             pf->main_vsi->floating_veb == NULL) {
4768                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
4769
4770                 if (pf->main_vsi->floating_veb == NULL) {
4771                         PMD_DRV_LOG(ERR, "VEB setup failed");
4772                         return NULL;
4773                 }
4774         }
4775
4776         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
4777         if (!vsi) {
4778                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
4779                 return NULL;
4780         }
4781         TAILQ_INIT(&vsi->mac_list);
4782         vsi->type = type;
4783         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
4784         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
4785         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
4786         vsi->user_param = user_param;
4787         vsi->vlan_anti_spoof_on = 0;
4788         vsi->vlan_filter_on = 0;
4789         /* Allocate queues */
4790         switch (vsi->type) {
4791         case I40E_VSI_MAIN  :
4792                 vsi->nb_qps = pf->lan_nb_qps;
4793                 break;
4794         case I40E_VSI_SRIOV :
4795                 vsi->nb_qps = pf->vf_nb_qps;
4796                 break;
4797         case I40E_VSI_VMDQ2:
4798                 vsi->nb_qps = pf->vmdq_nb_qps;
4799                 break;
4800         case I40E_VSI_FDIR:
4801                 vsi->nb_qps = pf->fdir_nb_qps;
4802                 break;
4803         default:
4804                 goto fail_mem;
4805         }
4806         /*
4807          * The filter status descriptor is reported in rx queue 0,
4808          * while the tx queue for fdir filter programming has no
4809          * such constraints, can be non-zero queues.
4810          * To simplify it, choose FDIR vsi use queue 0 pair.
4811          * To make sure it will use queue 0 pair, queue allocation
4812          * need be done before this function is called
4813          */
4814         if (type != I40E_VSI_FDIR) {
4815                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
4816                         if (ret < 0) {
4817                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
4818                                                 vsi->seid, ret);
4819                                 goto fail_mem;
4820                         }
4821                         vsi->base_queue = ret;
4822         } else
4823                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
4824
4825         /* VF has MSIX interrupt in VF range, don't allocate here */
4826         if (type == I40E_VSI_MAIN) {
4827                 ret = i40e_res_pool_alloc(&pf->msix_pool,
4828                                           RTE_MIN(vsi->nb_qps,
4829                                                   RTE_MAX_RXTX_INTR_VEC_ID));
4830                 if (ret < 0) {
4831                         PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
4832                                     vsi->seid, ret);
4833                         goto fail_queue_alloc;
4834                 }
4835                 vsi->msix_intr = ret;
4836                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
4837         } else if (type != I40E_VSI_SRIOV) {
4838                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
4839                 if (ret < 0) {
4840                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
4841                         goto fail_queue_alloc;
4842                 }
4843                 vsi->msix_intr = ret;
4844                 vsi->nb_msix = 1;
4845         } else {
4846                 vsi->msix_intr = 0;
4847                 vsi->nb_msix = 0;
4848         }
4849
4850         /* Add VSI */
4851         if (type == I40E_VSI_MAIN) {
4852                 /* For main VSI, no need to add since it's default one */
4853                 vsi->uplink_seid = pf->mac_seid;
4854                 vsi->seid = pf->main_vsi_seid;
4855                 /* Bind queues with specific MSIX interrupt */
4856                 /**
4857                  * Needs 2 interrupt at least, one for misc cause which will
4858                  * enabled from OS side, Another for queues binding the
4859                  * interrupt from device side only.
4860                  */
4861
4862                 /* Get default VSI parameters from hardware */
4863                 memset(&ctxt, 0, sizeof(ctxt));
4864                 ctxt.seid = vsi->seid;
4865                 ctxt.pf_num = hw->pf_id;
4866                 ctxt.uplink_seid = vsi->uplink_seid;
4867                 ctxt.vf_num = 0;
4868                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4869                 if (ret != I40E_SUCCESS) {
4870                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
4871                         goto fail_msix_alloc;
4872                 }
4873                 (void)rte_memcpy(&vsi->info, &ctxt.info,
4874                         sizeof(struct i40e_aqc_vsi_properties_data));
4875                 vsi->vsi_id = ctxt.vsi_number;
4876                 vsi->info.valid_sections = 0;
4877
4878                 /* Configure tc, enabled TC0 only */
4879                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
4880                         I40E_SUCCESS) {
4881                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
4882                         goto fail_msix_alloc;
4883                 }
4884
4885                 /* TC, queue mapping */
4886                 memset(&ctxt, 0, sizeof(ctxt));
4887                 vsi->info.valid_sections |=
4888                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4889                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
4890                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4891                 (void)rte_memcpy(&ctxt.info, &vsi->info,
4892                         sizeof(struct i40e_aqc_vsi_properties_data));
4893                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4894                                                 I40E_DEFAULT_TCMAP);
4895                 if (ret != I40E_SUCCESS) {
4896                         PMD_DRV_LOG(ERR,
4897                                 "Failed to configure TC queue mapping");
4898                         goto fail_msix_alloc;
4899                 }
4900                 ctxt.seid = vsi->seid;
4901                 ctxt.pf_num = hw->pf_id;
4902                 ctxt.uplink_seid = vsi->uplink_seid;
4903                 ctxt.vf_num = 0;
4904
4905                 /* Update VSI parameters */
4906                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4907                 if (ret != I40E_SUCCESS) {
4908                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
4909                         goto fail_msix_alloc;
4910                 }
4911
4912                 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
4913                                                 sizeof(vsi->info.tc_mapping));
4914                 (void)rte_memcpy(&vsi->info.queue_mapping,
4915                                 &ctxt.info.queue_mapping,
4916                         sizeof(vsi->info.queue_mapping));
4917                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
4918                 vsi->info.valid_sections = 0;
4919
4920                 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
4921                                 ETH_ADDR_LEN);
4922
4923                 /**
4924                  * Updating default filter settings are necessary to prevent
4925                  * reception of tagged packets.
4926                  * Some old firmware configurations load a default macvlan
4927                  * filter which accepts both tagged and untagged packets.
4928                  * The updating is to use a normal filter instead if needed.
4929                  * For NVM 4.2.2 or after, the updating is not needed anymore.
4930                  * The firmware with correct configurations load the default
4931                  * macvlan filter which is expected and cannot be removed.
4932                  */
4933                 i40e_update_default_filter_setting(vsi);
4934                 i40e_config_qinq(hw, vsi);
4935         } else if (type == I40E_VSI_SRIOV) {
4936                 memset(&ctxt, 0, sizeof(ctxt));
4937                 /**
4938                  * For other VSI, the uplink_seid equals to uplink VSI's
4939                  * uplink_seid since they share same VEB
4940                  */
4941                 if (uplink_vsi == NULL)
4942                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
4943                 else
4944                         vsi->uplink_seid = uplink_vsi->uplink_seid;
4945                 ctxt.pf_num = hw->pf_id;
4946                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
4947                 ctxt.uplink_seid = vsi->uplink_seid;
4948                 ctxt.connection_type = 0x1;
4949                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
4950
4951                 /* Use the VEB configuration if FW >= v5.0 */
4952                 if (hw->aq.fw_maj_ver >= 5) {
4953                         /* Configure switch ID */
4954                         ctxt.info.valid_sections |=
4955                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4956                         ctxt.info.switch_id =
4957                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4958                 }
4959
4960                 /* Configure port/vlan */
4961                 ctxt.info.valid_sections |=
4962                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4963                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4964                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4965                                                 hw->func_caps.enabled_tcmap);
4966                 if (ret != I40E_SUCCESS) {
4967                         PMD_DRV_LOG(ERR,
4968                                 "Failed to configure TC queue mapping");
4969                         goto fail_msix_alloc;
4970                 }
4971
4972                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
4973                 ctxt.info.valid_sections |=
4974                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4975                 /**
4976                  * Since VSI is not created yet, only configure parameter,
4977                  * will add vsi below.
4978                  */
4979
4980                 i40e_config_qinq(hw, vsi);
4981         } else if (type == I40E_VSI_VMDQ2) {
4982                 memset(&ctxt, 0, sizeof(ctxt));
4983                 /*
4984                  * For other VSI, the uplink_seid equals to uplink VSI's
4985                  * uplink_seid since they share same VEB
4986                  */
4987                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4988                 ctxt.pf_num = hw->pf_id;
4989                 ctxt.vf_num = 0;
4990                 ctxt.uplink_seid = vsi->uplink_seid;
4991                 ctxt.connection_type = 0x1;
4992                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
4993
4994                 ctxt.info.valid_sections |=
4995                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4996                 /* user_param carries flag to enable loop back */
4997                 if (user_param) {
4998                         ctxt.info.switch_id =
4999                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5000                         ctxt.info.switch_id |=
5001                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5002                 }
5003
5004                 /* Configure port/vlan */
5005                 ctxt.info.valid_sections |=
5006                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5007                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5008                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5009                                                 I40E_DEFAULT_TCMAP);
5010                 if (ret != I40E_SUCCESS) {
5011                         PMD_DRV_LOG(ERR,
5012                                 "Failed to configure TC queue mapping");
5013                         goto fail_msix_alloc;
5014                 }
5015                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5016                 ctxt.info.valid_sections |=
5017                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5018         } else if (type == I40E_VSI_FDIR) {
5019                 memset(&ctxt, 0, sizeof(ctxt));
5020                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5021                 ctxt.pf_num = hw->pf_id;
5022                 ctxt.vf_num = 0;
5023                 ctxt.uplink_seid = vsi->uplink_seid;
5024                 ctxt.connection_type = 0x1;     /* regular data port */
5025                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5026                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5027                                                 I40E_DEFAULT_TCMAP);
5028                 if (ret != I40E_SUCCESS) {
5029                         PMD_DRV_LOG(ERR,
5030                                 "Failed to configure TC queue mapping.");
5031                         goto fail_msix_alloc;
5032                 }
5033                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5034                 ctxt.info.valid_sections |=
5035                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5036         } else {
5037                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5038                 goto fail_msix_alloc;
5039         }
5040
5041         if (vsi->type != I40E_VSI_MAIN) {
5042                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5043                 if (ret != I40E_SUCCESS) {
5044                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5045                                     hw->aq.asq_last_status);
5046                         goto fail_msix_alloc;
5047                 }
5048                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5049                 vsi->info.valid_sections = 0;
5050                 vsi->seid = ctxt.seid;
5051                 vsi->vsi_id = ctxt.vsi_number;
5052                 vsi->sib_vsi_list.vsi = vsi;
5053                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5054                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5055                                           &vsi->sib_vsi_list, list);
5056                 } else {
5057                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5058                                           &vsi->sib_vsi_list, list);
5059                 }
5060         }
5061
5062         /* MAC/VLAN configuration */
5063         (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5064         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5065
5066         ret = i40e_vsi_add_mac(vsi, &filter);
5067         if (ret != I40E_SUCCESS) {
5068                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5069                 goto fail_msix_alloc;
5070         }
5071
5072         /* Get VSI BW information */
5073         i40e_vsi_get_bw_config(vsi);
5074         return vsi;
5075 fail_msix_alloc:
5076         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5077 fail_queue_alloc:
5078         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5079 fail_mem:
5080         rte_free(vsi);
5081         return NULL;
5082 }
5083
5084 /* Configure vlan filter on or off */
5085 int
5086 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5087 {
5088         int i, num;
5089         struct i40e_mac_filter *f;
5090         void *temp;
5091         struct i40e_mac_filter_info *mac_filter;
5092         enum rte_mac_filter_type desired_filter;
5093         int ret = I40E_SUCCESS;
5094
5095         if (on) {
5096                 /* Filter to match MAC and VLAN */
5097                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5098         } else {
5099                 /* Filter to match only MAC */
5100                 desired_filter = RTE_MAC_PERFECT_MATCH;
5101         }
5102
5103         num = vsi->mac_num;
5104
5105         mac_filter = rte_zmalloc("mac_filter_info_data",
5106                                  num * sizeof(*mac_filter), 0);
5107         if (mac_filter == NULL) {
5108                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5109                 return I40E_ERR_NO_MEMORY;
5110         }
5111
5112         i = 0;
5113
5114         /* Remove all existing mac */
5115         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5116                 mac_filter[i] = f->mac_info;
5117                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5118                 if (ret) {
5119                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5120                                     on ? "enable" : "disable");
5121                         goto DONE;
5122                 }
5123                 i++;
5124         }
5125
5126         /* Override with new filter */
5127         for (i = 0; i < num; i++) {
5128                 mac_filter[i].filter_type = desired_filter;
5129                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5130                 if (ret) {
5131                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5132                                     on ? "enable" : "disable");
5133                         goto DONE;
5134                 }
5135         }
5136
5137 DONE:
5138         rte_free(mac_filter);
5139         return ret;
5140 }
5141
5142 /* Configure vlan stripping on or off */
5143 int
5144 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5145 {
5146         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5147         struct i40e_vsi_context ctxt;
5148         uint8_t vlan_flags;
5149         int ret = I40E_SUCCESS;
5150
5151         /* Check if it has been already on or off */
5152         if (vsi->info.valid_sections &
5153                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5154                 if (on) {
5155                         if ((vsi->info.port_vlan_flags &
5156                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5157                                 return 0; /* already on */
5158                 } else {
5159                         if ((vsi->info.port_vlan_flags &
5160                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5161                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5162                                 return 0; /* already off */
5163                 }
5164         }
5165
5166         if (on)
5167                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5168         else
5169                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5170         vsi->info.valid_sections =
5171                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5172         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5173         vsi->info.port_vlan_flags |= vlan_flags;
5174         ctxt.seid = vsi->seid;
5175         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5176         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5177         if (ret)
5178                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5179                             on ? "enable" : "disable");
5180
5181         return ret;
5182 }
5183
5184 static int
5185 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5186 {
5187         struct rte_eth_dev_data *data = dev->data;
5188         int ret;
5189         int mask = 0;
5190
5191         /* Apply vlan offload setting */
5192         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
5193         i40e_vlan_offload_set(dev, mask);
5194
5195         /* Apply double-vlan setting, not implemented yet */
5196
5197         /* Apply pvid setting */
5198         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5199                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
5200         if (ret)
5201                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5202
5203         return ret;
5204 }
5205
5206 static int
5207 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5208 {
5209         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5210
5211         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5212 }
5213
5214 static int
5215 i40e_update_flow_control(struct i40e_hw *hw)
5216 {
5217 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5218         struct i40e_link_status link_status;
5219         uint32_t rxfc = 0, txfc = 0, reg;
5220         uint8_t an_info;
5221         int ret;
5222
5223         memset(&link_status, 0, sizeof(link_status));
5224         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5225         if (ret != I40E_SUCCESS) {
5226                 PMD_DRV_LOG(ERR, "Failed to get link status information");
5227                 goto write_reg; /* Disable flow control */
5228         }
5229
5230         an_info = hw->phy.link_info.an_info;
5231         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5232                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5233                 ret = I40E_ERR_NOT_READY;
5234                 goto write_reg; /* Disable flow control */
5235         }
5236         /**
5237          * If link auto negotiation is enabled, flow control needs to
5238          * be configured according to it
5239          */
5240         switch (an_info & I40E_LINK_PAUSE_RXTX) {
5241         case I40E_LINK_PAUSE_RXTX:
5242                 rxfc = 1;
5243                 txfc = 1;
5244                 hw->fc.current_mode = I40E_FC_FULL;
5245                 break;
5246         case I40E_AQ_LINK_PAUSE_RX:
5247                 rxfc = 1;
5248                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5249                 break;
5250         case I40E_AQ_LINK_PAUSE_TX:
5251                 txfc = 1;
5252                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5253                 break;
5254         default:
5255                 hw->fc.current_mode = I40E_FC_NONE;
5256                 break;
5257         }
5258
5259 write_reg:
5260         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5261                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5262         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5263         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5264         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5265         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5266
5267         return ret;
5268 }
5269
5270 /* PF setup */
5271 static int
5272 i40e_pf_setup(struct i40e_pf *pf)
5273 {
5274         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5275         struct i40e_filter_control_settings settings;
5276         struct i40e_vsi *vsi;
5277         int ret;
5278
5279         /* Clear all stats counters */
5280         pf->offset_loaded = FALSE;
5281         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5282         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5283         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
5284         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
5285
5286         ret = i40e_pf_get_switch_config(pf);
5287         if (ret != I40E_SUCCESS) {
5288                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5289                 return ret;
5290         }
5291         if (pf->flags & I40E_FLAG_FDIR) {
5292                 /* make queue allocated first, let FDIR use queue pair 0*/
5293                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5294                 if (ret != I40E_FDIR_QUEUE_ID) {
5295                         PMD_DRV_LOG(ERR,
5296                                 "queue allocation fails for FDIR: ret =%d",
5297                                 ret);
5298                         pf->flags &= ~I40E_FLAG_FDIR;
5299                 }
5300         }
5301         /*  main VSI setup */
5302         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5303         if (!vsi) {
5304                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5305                 return I40E_ERR_NOT_READY;
5306         }
5307         pf->main_vsi = vsi;
5308
5309         /* Configure filter control */
5310         memset(&settings, 0, sizeof(settings));
5311         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5312                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5313         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5314                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5315         else {
5316                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5317                         hw->func_caps.rss_table_size);
5318                 return I40E_ERR_PARAM;
5319         }
5320         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5321                 hw->func_caps.rss_table_size);
5322         pf->hash_lut_size = hw->func_caps.rss_table_size;
5323
5324         /* Enable ethtype and macvlan filters */
5325         settings.enable_ethtype = TRUE;
5326         settings.enable_macvlan = TRUE;
5327         ret = i40e_set_filter_control(hw, &settings);
5328         if (ret)
5329                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5330                                                                 ret);
5331
5332         /* Update flow control according to the auto negotiation */
5333         i40e_update_flow_control(hw);
5334
5335         return I40E_SUCCESS;
5336 }
5337
5338 int
5339 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5340 {
5341         uint32_t reg;
5342         uint16_t j;
5343
5344         /**
5345          * Set or clear TX Queue Disable flags,
5346          * which is required by hardware.
5347          */
5348         i40e_pre_tx_queue_cfg(hw, q_idx, on);
5349         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5350
5351         /* Wait until the request is finished */
5352         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5353                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5354                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5355                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5356                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5357                                                         & 0x1))) {
5358                         break;
5359                 }
5360         }
5361         if (on) {
5362                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5363                         return I40E_SUCCESS; /* already on, skip next steps */
5364
5365                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5366                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5367         } else {
5368                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5369                         return I40E_SUCCESS; /* already off, skip next steps */
5370                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5371         }
5372         /* Write the register */
5373         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5374         /* Check the result */
5375         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5376                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5377                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5378                 if (on) {
5379                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5380                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5381                                 break;
5382                 } else {
5383                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5384                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5385                                 break;
5386                 }
5387         }
5388         /* Check if it is timeout */
5389         if (j >= I40E_CHK_Q_ENA_COUNT) {
5390                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5391                             (on ? "enable" : "disable"), q_idx);
5392                 return I40E_ERR_TIMEOUT;
5393         }
5394
5395         return I40E_SUCCESS;
5396 }
5397
5398 /* Swith on or off the tx queues */
5399 static int
5400 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5401 {
5402         struct rte_eth_dev_data *dev_data = pf->dev_data;
5403         struct i40e_tx_queue *txq;
5404         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5405         uint16_t i;
5406         int ret;
5407
5408         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5409                 txq = dev_data->tx_queues[i];
5410                 /* Don't operate the queue if not configured or
5411                  * if starting only per queue */
5412                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5413                         continue;
5414                 if (on)
5415                         ret = i40e_dev_tx_queue_start(dev, i);
5416                 else
5417                         ret = i40e_dev_tx_queue_stop(dev, i);
5418                 if ( ret != I40E_SUCCESS)
5419                         return ret;
5420         }
5421
5422         return I40E_SUCCESS;
5423 }
5424
5425 int
5426 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5427 {
5428         uint32_t reg;
5429         uint16_t j;
5430
5431         /* Wait until the request is finished */
5432         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5433                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5434                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5435                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5436                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5437                         break;
5438         }
5439
5440         if (on) {
5441                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5442                         return I40E_SUCCESS; /* Already on, skip next steps */
5443                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5444         } else {
5445                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5446                         return I40E_SUCCESS; /* Already off, skip next steps */
5447                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5448         }
5449
5450         /* Write the register */
5451         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5452         /* Check the result */
5453         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5454                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5455                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5456                 if (on) {
5457                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5458                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
5459                                 break;
5460                 } else {
5461                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5462                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5463                                 break;
5464                 }
5465         }
5466
5467         /* Check if it is timeout */
5468         if (j >= I40E_CHK_Q_ENA_COUNT) {
5469                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
5470                             (on ? "enable" : "disable"), q_idx);
5471                 return I40E_ERR_TIMEOUT;
5472         }
5473
5474         return I40E_SUCCESS;
5475 }
5476 /* Switch on or off the rx queues */
5477 static int
5478 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
5479 {
5480         struct rte_eth_dev_data *dev_data = pf->dev_data;
5481         struct i40e_rx_queue *rxq;
5482         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5483         uint16_t i;
5484         int ret;
5485
5486         for (i = 0; i < dev_data->nb_rx_queues; i++) {
5487                 rxq = dev_data->rx_queues[i];
5488                 /* Don't operate the queue if not configured or
5489                  * if starting only per queue */
5490                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
5491                         continue;
5492                 if (on)
5493                         ret = i40e_dev_rx_queue_start(dev, i);
5494                 else
5495                         ret = i40e_dev_rx_queue_stop(dev, i);
5496                 if (ret != I40E_SUCCESS)
5497                         return ret;
5498         }
5499
5500         return I40E_SUCCESS;
5501 }
5502
5503 /* Switch on or off all the rx/tx queues */
5504 int
5505 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
5506 {
5507         int ret;
5508
5509         if (on) {
5510                 /* enable rx queues before enabling tx queues */
5511                 ret = i40e_dev_switch_rx_queues(pf, on);
5512                 if (ret) {
5513                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
5514                         return ret;
5515                 }
5516                 ret = i40e_dev_switch_tx_queues(pf, on);
5517         } else {
5518                 /* Stop tx queues before stopping rx queues */
5519                 ret = i40e_dev_switch_tx_queues(pf, on);
5520                 if (ret) {
5521                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
5522                         return ret;
5523                 }
5524                 ret = i40e_dev_switch_rx_queues(pf, on);
5525         }
5526
5527         return ret;
5528 }
5529
5530 /* Initialize VSI for TX */
5531 static int
5532 i40e_dev_tx_init(struct i40e_pf *pf)
5533 {
5534         struct rte_eth_dev_data *data = pf->dev_data;
5535         uint16_t i;
5536         uint32_t ret = I40E_SUCCESS;
5537         struct i40e_tx_queue *txq;
5538
5539         for (i = 0; i < data->nb_tx_queues; i++) {
5540                 txq = data->tx_queues[i];
5541                 if (!txq || !txq->q_set)
5542                         continue;
5543                 ret = i40e_tx_queue_init(txq);
5544                 if (ret != I40E_SUCCESS)
5545                         break;
5546         }
5547         if (ret == I40E_SUCCESS)
5548                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
5549                                      ->eth_dev);
5550
5551         return ret;
5552 }
5553
5554 /* Initialize VSI for RX */
5555 static int
5556 i40e_dev_rx_init(struct i40e_pf *pf)
5557 {
5558         struct rte_eth_dev_data *data = pf->dev_data;
5559         int ret = I40E_SUCCESS;
5560         uint16_t i;
5561         struct i40e_rx_queue *rxq;
5562
5563         i40e_pf_config_mq_rx(pf);
5564         for (i = 0; i < data->nb_rx_queues; i++) {
5565                 rxq = data->rx_queues[i];
5566                 if (!rxq || !rxq->q_set)
5567                         continue;
5568
5569                 ret = i40e_rx_queue_init(rxq);
5570                 if (ret != I40E_SUCCESS) {
5571                         PMD_DRV_LOG(ERR,
5572                                 "Failed to do RX queue initialization");
5573                         break;
5574                 }
5575         }
5576         if (ret == I40E_SUCCESS)
5577                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
5578                                      ->eth_dev);
5579
5580         return ret;
5581 }
5582
5583 static int
5584 i40e_dev_rxtx_init(struct i40e_pf *pf)
5585 {
5586         int err;
5587
5588         err = i40e_dev_tx_init(pf);
5589         if (err) {
5590                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
5591                 return err;
5592         }
5593         err = i40e_dev_rx_init(pf);
5594         if (err) {
5595                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
5596                 return err;
5597         }
5598
5599         return err;
5600 }
5601
5602 static int
5603 i40e_vmdq_setup(struct rte_eth_dev *dev)
5604 {
5605         struct rte_eth_conf *conf = &dev->data->dev_conf;
5606         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5607         int i, err, conf_vsis, j, loop;
5608         struct i40e_vsi *vsi;
5609         struct i40e_vmdq_info *vmdq_info;
5610         struct rte_eth_vmdq_rx_conf *vmdq_conf;
5611         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5612
5613         /*
5614          * Disable interrupt to avoid message from VF. Furthermore, it will
5615          * avoid race condition in VSI creation/destroy.
5616          */
5617         i40e_pf_disable_irq0(hw);
5618
5619         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
5620                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
5621                 return -ENOTSUP;
5622         }
5623
5624         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
5625         if (conf_vsis > pf->max_nb_vmdq_vsi) {
5626                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
5627                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
5628                         pf->max_nb_vmdq_vsi);
5629                 return -ENOTSUP;
5630         }
5631
5632         if (pf->vmdq != NULL) {
5633                 PMD_INIT_LOG(INFO, "VMDQ already configured");
5634                 return 0;
5635         }
5636
5637         pf->vmdq = rte_zmalloc("vmdq_info_struct",
5638                                 sizeof(*vmdq_info) * conf_vsis, 0);
5639
5640         if (pf->vmdq == NULL) {
5641                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
5642                 return -ENOMEM;
5643         }
5644
5645         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
5646
5647         /* Create VMDQ VSI */
5648         for (i = 0; i < conf_vsis; i++) {
5649                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
5650                                 vmdq_conf->enable_loop_back);
5651                 if (vsi == NULL) {
5652                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
5653                         err = -1;
5654                         goto err_vsi_setup;
5655                 }
5656                 vmdq_info = &pf->vmdq[i];
5657                 vmdq_info->pf = pf;
5658                 vmdq_info->vsi = vsi;
5659         }
5660         pf->nb_cfg_vmdq_vsi = conf_vsis;
5661
5662         /* Configure Vlan */
5663         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
5664         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
5665                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
5666                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
5667                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
5668                                         vmdq_conf->pool_map[i].vlan_id, j);
5669
5670                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
5671                                                 vmdq_conf->pool_map[i].vlan_id);
5672                                 if (err) {
5673                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
5674                                         err = -1;
5675                                         goto err_vsi_setup;
5676                                 }
5677                         }
5678                 }
5679         }
5680
5681         i40e_pf_enable_irq0(hw);
5682
5683         return 0;
5684
5685 err_vsi_setup:
5686         for (i = 0; i < conf_vsis; i++)
5687                 if (pf->vmdq[i].vsi == NULL)
5688                         break;
5689                 else
5690                         i40e_vsi_release(pf->vmdq[i].vsi);
5691
5692         rte_free(pf->vmdq);
5693         pf->vmdq = NULL;
5694         i40e_pf_enable_irq0(hw);
5695         return err;
5696 }
5697
5698 static void
5699 i40e_stat_update_32(struct i40e_hw *hw,
5700                    uint32_t reg,
5701                    bool offset_loaded,
5702                    uint64_t *offset,
5703                    uint64_t *stat)
5704 {
5705         uint64_t new_data;
5706
5707         new_data = (uint64_t)I40E_READ_REG(hw, reg);
5708         if (!offset_loaded)
5709                 *offset = new_data;
5710
5711         if (new_data >= *offset)
5712                 *stat = (uint64_t)(new_data - *offset);
5713         else
5714                 *stat = (uint64_t)((new_data +
5715                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
5716 }
5717
5718 static void
5719 i40e_stat_update_48(struct i40e_hw *hw,
5720                    uint32_t hireg,
5721                    uint32_t loreg,
5722                    bool offset_loaded,
5723                    uint64_t *offset,
5724                    uint64_t *stat)
5725 {
5726         uint64_t new_data;
5727
5728         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
5729         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
5730                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
5731
5732         if (!offset_loaded)
5733                 *offset = new_data;
5734
5735         if (new_data >= *offset)
5736                 *stat = new_data - *offset;
5737         else
5738                 *stat = (uint64_t)((new_data +
5739                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
5740
5741         *stat &= I40E_48_BIT_MASK;
5742 }
5743
5744 /* Disable IRQ0 */
5745 void
5746 i40e_pf_disable_irq0(struct i40e_hw *hw)
5747 {
5748         /* Disable all interrupt types */
5749         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
5750         I40E_WRITE_FLUSH(hw);
5751 }
5752
5753 /* Enable IRQ0 */
5754 void
5755 i40e_pf_enable_irq0(struct i40e_hw *hw)
5756 {
5757         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
5758                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
5759                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
5760                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
5761         I40E_WRITE_FLUSH(hw);
5762 }
5763
5764 static void
5765 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
5766 {
5767         /* read pending request and disable first */
5768         i40e_pf_disable_irq0(hw);
5769         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
5770         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
5771                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
5772
5773         if (no_queue)
5774                 /* Link no queues with irq0 */
5775                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
5776                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
5777 }
5778
5779 static void
5780 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
5781 {
5782         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5783         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5784         int i;
5785         uint16_t abs_vf_id;
5786         uint32_t index, offset, val;
5787
5788         if (!pf->vfs)
5789                 return;
5790         /**
5791          * Try to find which VF trigger a reset, use absolute VF id to access
5792          * since the reg is global register.
5793          */
5794         for (i = 0; i < pf->vf_num; i++) {
5795                 abs_vf_id = hw->func_caps.vf_base_id + i;
5796                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
5797                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
5798                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
5799                 /* VFR event occurred */
5800                 if (val & (0x1 << offset)) {
5801                         int ret;
5802
5803                         /* Clear the event first */
5804                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
5805                                                         (0x1 << offset));
5806                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
5807                         /**
5808                          * Only notify a VF reset event occurred,
5809                          * don't trigger another SW reset
5810                          */
5811                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
5812                         if (ret != I40E_SUCCESS)
5813                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
5814                 }
5815         }
5816 }
5817
5818 static void
5819 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
5820 {
5821         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5822         int i;
5823
5824         for (i = 0; i < pf->vf_num; i++)
5825                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
5826 }
5827
5828 static void
5829 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
5830 {
5831         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5832         struct i40e_arq_event_info info;
5833         uint16_t pending, opcode;
5834         int ret;
5835
5836         info.buf_len = I40E_AQ_BUF_SZ;
5837         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
5838         if (!info.msg_buf) {
5839                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
5840                 return;
5841         }
5842
5843         pending = 1;
5844         while (pending) {
5845                 ret = i40e_clean_arq_element(hw, &info, &pending);
5846
5847                 if (ret != I40E_SUCCESS) {
5848                         PMD_DRV_LOG(INFO,
5849                                 "Failed to read msg from AdminQ, aq_err: %u",
5850                                 hw->aq.asq_last_status);
5851                         break;
5852                 }
5853                 opcode = rte_le_to_cpu_16(info.desc.opcode);
5854
5855                 switch (opcode) {
5856                 case i40e_aqc_opc_send_msg_to_pf:
5857                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
5858                         i40e_pf_host_handle_vf_msg(dev,
5859                                         rte_le_to_cpu_16(info.desc.retval),
5860                                         rte_le_to_cpu_32(info.desc.cookie_high),
5861                                         rte_le_to_cpu_32(info.desc.cookie_low),
5862                                         info.msg_buf,
5863                                         info.msg_len);
5864                         break;
5865                 case i40e_aqc_opc_get_link_status:
5866                         ret = i40e_dev_link_update(dev, 0);
5867                         if (!ret)
5868                                 _rte_eth_dev_callback_process(dev,
5869                                         RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
5870                         break;
5871                 default:
5872                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
5873                                     opcode);
5874                         break;
5875                 }
5876         }
5877         rte_free(info.msg_buf);
5878 }
5879
5880 /**
5881  * Interrupt handler triggered by NIC  for handling
5882  * specific interrupt.
5883  *
5884  * @param handle
5885  *  Pointer to interrupt handle.
5886  * @param param
5887  *  The address of parameter (struct rte_eth_dev *) regsitered before.
5888  *
5889  * @return
5890  *  void
5891  */
5892 static void
5893 i40e_dev_interrupt_handler(void *param)
5894 {
5895         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
5896         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5897         uint32_t icr0;
5898
5899         /* Disable interrupt */
5900         i40e_pf_disable_irq0(hw);
5901
5902         /* read out interrupt causes */
5903         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
5904
5905         /* No interrupt event indicated */
5906         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
5907                 PMD_DRV_LOG(INFO, "No interrupt event");
5908                 goto done;
5909         }
5910         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
5911                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
5912         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
5913                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
5914         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
5915                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
5916         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
5917                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
5918         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
5919                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
5920         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
5921                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
5922         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
5923                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
5924
5925         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
5926                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
5927                 i40e_dev_handle_vfr_event(dev);
5928         }
5929         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
5930                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
5931                 i40e_dev_handle_aq_msg(dev);
5932         }
5933
5934 done:
5935         /* Enable interrupt */
5936         i40e_pf_enable_irq0(hw);
5937         rte_intr_enable(dev->intr_handle);
5938 }
5939
5940 int
5941 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
5942                          struct i40e_macvlan_filter *filter,
5943                          int total)
5944 {
5945         int ele_num, ele_buff_size;
5946         int num, actual_num, i;
5947         uint16_t flags;
5948         int ret = I40E_SUCCESS;
5949         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5950         struct i40e_aqc_add_macvlan_element_data *req_list;
5951
5952         if (filter == NULL  || total == 0)
5953                 return I40E_ERR_PARAM;
5954         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5955         ele_buff_size = hw->aq.asq_buf_size;
5956
5957         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
5958         if (req_list == NULL) {
5959                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5960                 return I40E_ERR_NO_MEMORY;
5961         }
5962
5963         num = 0;
5964         do {
5965                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5966                 memset(req_list, 0, ele_buff_size);
5967
5968                 for (i = 0; i < actual_num; i++) {
5969                         (void)rte_memcpy(req_list[i].mac_addr,
5970                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
5971                         req_list[i].vlan_tag =
5972                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
5973
5974                         switch (filter[num + i].filter_type) {
5975                         case RTE_MAC_PERFECT_MATCH:
5976                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
5977                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5978                                 break;
5979                         case RTE_MACVLAN_PERFECT_MATCH:
5980                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
5981                                 break;
5982                         case RTE_MAC_HASH_MATCH:
5983                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
5984                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5985                                 break;
5986                         case RTE_MACVLAN_HASH_MATCH:
5987                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
5988                                 break;
5989                         default:
5990                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
5991                                 ret = I40E_ERR_PARAM;
5992                                 goto DONE;
5993                         }
5994
5995                         req_list[i].queue_number = 0;
5996
5997                         req_list[i].flags = rte_cpu_to_le_16(flags);
5998                 }
5999
6000                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6001                                                 actual_num, NULL);
6002                 if (ret != I40E_SUCCESS) {
6003                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6004                         goto DONE;
6005                 }
6006                 num += actual_num;
6007         } while (num < total);
6008
6009 DONE:
6010         rte_free(req_list);
6011         return ret;
6012 }
6013
6014 int
6015 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6016                             struct i40e_macvlan_filter *filter,
6017                             int total)
6018 {
6019         int ele_num, ele_buff_size;
6020         int num, actual_num, i;
6021         uint16_t flags;
6022         int ret = I40E_SUCCESS;
6023         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6024         struct i40e_aqc_remove_macvlan_element_data *req_list;
6025
6026         if (filter == NULL  || total == 0)
6027                 return I40E_ERR_PARAM;
6028
6029         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6030         ele_buff_size = hw->aq.asq_buf_size;
6031
6032         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6033         if (req_list == NULL) {
6034                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6035                 return I40E_ERR_NO_MEMORY;
6036         }
6037
6038         num = 0;
6039         do {
6040                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6041                 memset(req_list, 0, ele_buff_size);
6042
6043                 for (i = 0; i < actual_num; i++) {
6044                         (void)rte_memcpy(req_list[i].mac_addr,
6045                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6046                         req_list[i].vlan_tag =
6047                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6048
6049                         switch (filter[num + i].filter_type) {
6050                         case RTE_MAC_PERFECT_MATCH:
6051                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6052                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6053                                 break;
6054                         case RTE_MACVLAN_PERFECT_MATCH:
6055                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6056                                 break;
6057                         case RTE_MAC_HASH_MATCH:
6058                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6059                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6060                                 break;
6061                         case RTE_MACVLAN_HASH_MATCH:
6062                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6063                                 break;
6064                         default:
6065                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6066                                 ret = I40E_ERR_PARAM;
6067                                 goto DONE;
6068                         }
6069                         req_list[i].flags = rte_cpu_to_le_16(flags);
6070                 }
6071
6072                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6073                                                 actual_num, NULL);
6074                 if (ret != I40E_SUCCESS) {
6075                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6076                         goto DONE;
6077                 }
6078                 num += actual_num;
6079         } while (num < total);
6080
6081 DONE:
6082         rte_free(req_list);
6083         return ret;
6084 }
6085
6086 /* Find out specific MAC filter */
6087 static struct i40e_mac_filter *
6088 i40e_find_mac_filter(struct i40e_vsi *vsi,
6089                          struct ether_addr *macaddr)
6090 {
6091         struct i40e_mac_filter *f;
6092
6093         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6094                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6095                         return f;
6096         }
6097
6098         return NULL;
6099 }
6100
6101 static bool
6102 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6103                          uint16_t vlan_id)
6104 {
6105         uint32_t vid_idx, vid_bit;
6106
6107         if (vlan_id > ETH_VLAN_ID_MAX)
6108                 return 0;
6109
6110         vid_idx = I40E_VFTA_IDX(vlan_id);
6111         vid_bit = I40E_VFTA_BIT(vlan_id);
6112
6113         if (vsi->vfta[vid_idx] & vid_bit)
6114                 return 1;
6115         else
6116                 return 0;
6117 }
6118
6119 static void
6120 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6121                        uint16_t vlan_id, bool on)
6122 {
6123         uint32_t vid_idx, vid_bit;
6124
6125         vid_idx = I40E_VFTA_IDX(vlan_id);
6126         vid_bit = I40E_VFTA_BIT(vlan_id);
6127
6128         if (on)
6129                 vsi->vfta[vid_idx] |= vid_bit;
6130         else
6131                 vsi->vfta[vid_idx] &= ~vid_bit;
6132 }
6133
6134 void
6135 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6136                      uint16_t vlan_id, bool on)
6137 {
6138         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6139         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6140         int ret;
6141
6142         if (vlan_id > ETH_VLAN_ID_MAX)
6143                 return;
6144
6145         i40e_store_vlan_filter(vsi, vlan_id, on);
6146
6147         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6148                 return;
6149
6150         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6151
6152         if (on) {
6153                 ret = i40e_aq_add_vlan(hw, vsi->seid,
6154                                        &vlan_data, 1, NULL);
6155                 if (ret != I40E_SUCCESS)
6156                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6157         } else {
6158                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6159                                           &vlan_data, 1, NULL);
6160                 if (ret != I40E_SUCCESS)
6161                         PMD_DRV_LOG(ERR,
6162                                     "Failed to remove vlan filter");
6163         }
6164 }
6165
6166 /**
6167  * Find all vlan options for specific mac addr,
6168  * return with actual vlan found.
6169  */
6170 int
6171 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6172                            struct i40e_macvlan_filter *mv_f,
6173                            int num, struct ether_addr *addr)
6174 {
6175         int i;
6176         uint32_t j, k;
6177
6178         /**
6179          * Not to use i40e_find_vlan_filter to decrease the loop time,
6180          * although the code looks complex.
6181           */
6182         if (num < vsi->vlan_num)
6183                 return I40E_ERR_PARAM;
6184
6185         i = 0;
6186         for (j = 0; j < I40E_VFTA_SIZE; j++) {
6187                 if (vsi->vfta[j]) {
6188                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6189                                 if (vsi->vfta[j] & (1 << k)) {
6190                                         if (i > num - 1) {
6191                                                 PMD_DRV_LOG(ERR,
6192                                                         "vlan number doesn't match");
6193                                                 return I40E_ERR_PARAM;
6194                                         }
6195                                         (void)rte_memcpy(&mv_f[i].macaddr,
6196                                                         addr, ETH_ADDR_LEN);
6197                                         mv_f[i].vlan_id =
6198                                                 j * I40E_UINT32_BIT_SIZE + k;
6199                                         i++;
6200                                 }
6201                         }
6202                 }
6203         }
6204         return I40E_SUCCESS;
6205 }
6206
6207 static inline int
6208 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6209                            struct i40e_macvlan_filter *mv_f,
6210                            int num,
6211                            uint16_t vlan)
6212 {
6213         int i = 0;
6214         struct i40e_mac_filter *f;
6215
6216         if (num < vsi->mac_num)
6217                 return I40E_ERR_PARAM;
6218
6219         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6220                 if (i > num - 1) {
6221                         PMD_DRV_LOG(ERR, "buffer number not match");
6222                         return I40E_ERR_PARAM;
6223                 }
6224                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6225                                 ETH_ADDR_LEN);
6226                 mv_f[i].vlan_id = vlan;
6227                 mv_f[i].filter_type = f->mac_info.filter_type;
6228                 i++;
6229         }
6230
6231         return I40E_SUCCESS;
6232 }
6233
6234 static int
6235 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6236 {
6237         int i, j, num;
6238         struct i40e_mac_filter *f;
6239         struct i40e_macvlan_filter *mv_f;
6240         int ret = I40E_SUCCESS;
6241
6242         if (vsi == NULL || vsi->mac_num == 0)
6243                 return I40E_ERR_PARAM;
6244
6245         /* Case that no vlan is set */
6246         if (vsi->vlan_num == 0)
6247                 num = vsi->mac_num;
6248         else
6249                 num = vsi->mac_num * vsi->vlan_num;
6250
6251         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6252         if (mv_f == NULL) {
6253                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6254                 return I40E_ERR_NO_MEMORY;
6255         }
6256
6257         i = 0;
6258         if (vsi->vlan_num == 0) {
6259                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6260                         (void)rte_memcpy(&mv_f[i].macaddr,
6261                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6262                         mv_f[i].filter_type = f->mac_info.filter_type;
6263                         mv_f[i].vlan_id = 0;
6264                         i++;
6265                 }
6266         } else {
6267                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6268                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6269                                         vsi->vlan_num, &f->mac_info.mac_addr);
6270                         if (ret != I40E_SUCCESS)
6271                                 goto DONE;
6272                         for (j = i; j < i + vsi->vlan_num; j++)
6273                                 mv_f[j].filter_type = f->mac_info.filter_type;
6274                         i += vsi->vlan_num;
6275                 }
6276         }
6277
6278         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6279 DONE:
6280         rte_free(mv_f);
6281
6282         return ret;
6283 }
6284
6285 int
6286 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6287 {
6288         struct i40e_macvlan_filter *mv_f;
6289         int mac_num;
6290         int ret = I40E_SUCCESS;
6291
6292         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6293                 return I40E_ERR_PARAM;
6294
6295         /* If it's already set, just return */
6296         if (i40e_find_vlan_filter(vsi,vlan))
6297                 return I40E_SUCCESS;
6298
6299         mac_num = vsi->mac_num;
6300
6301         if (mac_num == 0) {
6302                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6303                 return I40E_ERR_PARAM;
6304         }
6305
6306         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6307
6308         if (mv_f == NULL) {
6309                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6310                 return I40E_ERR_NO_MEMORY;
6311         }
6312
6313         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6314
6315         if (ret != I40E_SUCCESS)
6316                 goto DONE;
6317
6318         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6319
6320         if (ret != I40E_SUCCESS)
6321                 goto DONE;
6322
6323         i40e_set_vlan_filter(vsi, vlan, 1);
6324
6325         vsi->vlan_num++;
6326         ret = I40E_SUCCESS;
6327 DONE:
6328         rte_free(mv_f);
6329         return ret;
6330 }
6331
6332 int
6333 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6334 {
6335         struct i40e_macvlan_filter *mv_f;
6336         int mac_num;
6337         int ret = I40E_SUCCESS;
6338
6339         /**
6340          * Vlan 0 is the generic filter for untagged packets
6341          * and can't be removed.
6342          */
6343         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6344                 return I40E_ERR_PARAM;
6345
6346         /* If can't find it, just return */
6347         if (!i40e_find_vlan_filter(vsi, vlan))
6348                 return I40E_ERR_PARAM;
6349
6350         mac_num = vsi->mac_num;
6351
6352         if (mac_num == 0) {
6353                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6354                 return I40E_ERR_PARAM;
6355         }
6356
6357         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6358
6359         if (mv_f == NULL) {
6360                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6361                 return I40E_ERR_NO_MEMORY;
6362         }
6363
6364         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6365
6366         if (ret != I40E_SUCCESS)
6367                 goto DONE;
6368
6369         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6370
6371         if (ret != I40E_SUCCESS)
6372                 goto DONE;
6373
6374         /* This is last vlan to remove, replace all mac filter with vlan 0 */
6375         if (vsi->vlan_num == 1) {
6376                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6377                 if (ret != I40E_SUCCESS)
6378                         goto DONE;
6379
6380                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6381                 if (ret != I40E_SUCCESS)
6382                         goto DONE;
6383         }
6384
6385         i40e_set_vlan_filter(vsi, vlan, 0);
6386
6387         vsi->vlan_num--;
6388         ret = I40E_SUCCESS;
6389 DONE:
6390         rte_free(mv_f);
6391         return ret;
6392 }
6393
6394 int
6395 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6396 {
6397         struct i40e_mac_filter *f;
6398         struct i40e_macvlan_filter *mv_f;
6399         int i, vlan_num = 0;
6400         int ret = I40E_SUCCESS;
6401
6402         /* If it's add and we've config it, return */
6403         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6404         if (f != NULL)
6405                 return I40E_SUCCESS;
6406         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6407                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6408
6409                 /**
6410                  * If vlan_num is 0, that's the first time to add mac,
6411                  * set mask for vlan_id 0.
6412                  */
6413                 if (vsi->vlan_num == 0) {
6414                         i40e_set_vlan_filter(vsi, 0, 1);
6415                         vsi->vlan_num = 1;
6416                 }
6417                 vlan_num = vsi->vlan_num;
6418         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6419                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6420                 vlan_num = 1;
6421
6422         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6423         if (mv_f == NULL) {
6424                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6425                 return I40E_ERR_NO_MEMORY;
6426         }
6427
6428         for (i = 0; i < vlan_num; i++) {
6429                 mv_f[i].filter_type = mac_filter->filter_type;
6430                 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6431                                 ETH_ADDR_LEN);
6432         }
6433
6434         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6435                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6436                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6437                                         &mac_filter->mac_addr);
6438                 if (ret != I40E_SUCCESS)
6439                         goto DONE;
6440         }
6441
6442         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6443         if (ret != I40E_SUCCESS)
6444                 goto DONE;
6445
6446         /* Add the mac addr into mac list */
6447         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6448         if (f == NULL) {
6449                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6450                 ret = I40E_ERR_NO_MEMORY;
6451                 goto DONE;
6452         }
6453         (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6454                         ETH_ADDR_LEN);
6455         f->mac_info.filter_type = mac_filter->filter_type;
6456         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6457         vsi->mac_num++;
6458
6459         ret = I40E_SUCCESS;
6460 DONE:
6461         rte_free(mv_f);
6462
6463         return ret;
6464 }
6465
6466 int
6467 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
6468 {
6469         struct i40e_mac_filter *f;
6470         struct i40e_macvlan_filter *mv_f;
6471         int i, vlan_num;
6472         enum rte_mac_filter_type filter_type;
6473         int ret = I40E_SUCCESS;
6474
6475         /* Can't find it, return an error */
6476         f = i40e_find_mac_filter(vsi, addr);
6477         if (f == NULL)
6478                 return I40E_ERR_PARAM;
6479
6480         vlan_num = vsi->vlan_num;
6481         filter_type = f->mac_info.filter_type;
6482         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6483                 filter_type == RTE_MACVLAN_HASH_MATCH) {
6484                 if (vlan_num == 0) {
6485                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
6486                         return I40E_ERR_PARAM;
6487                 }
6488         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
6489                         filter_type == RTE_MAC_HASH_MATCH)
6490                 vlan_num = 1;
6491
6492         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6493         if (mv_f == NULL) {
6494                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6495                 return I40E_ERR_NO_MEMORY;
6496         }
6497
6498         for (i = 0; i < vlan_num; i++) {
6499                 mv_f[i].filter_type = filter_type;
6500                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6501                                 ETH_ADDR_LEN);
6502         }
6503         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6504                         filter_type == RTE_MACVLAN_HASH_MATCH) {
6505                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
6506                 if (ret != I40E_SUCCESS)
6507                         goto DONE;
6508         }
6509
6510         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
6511         if (ret != I40E_SUCCESS)
6512                 goto DONE;
6513
6514         /* Remove the mac addr into mac list */
6515         TAILQ_REMOVE(&vsi->mac_list, f, next);
6516         rte_free(f);
6517         vsi->mac_num--;
6518
6519         ret = I40E_SUCCESS;
6520 DONE:
6521         rte_free(mv_f);
6522         return ret;
6523 }
6524
6525 /* Configure hash enable flags for RSS */
6526 uint64_t
6527 i40e_config_hena(uint64_t flags, enum i40e_mac_type type)
6528 {
6529         uint64_t hena = 0;
6530
6531         if (!flags)
6532                 return hena;
6533
6534         if (flags & ETH_RSS_FRAG_IPV4)
6535                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
6536         if (flags & ETH_RSS_NONFRAG_IPV4_TCP) {
6537                 if (type == I40E_MAC_X722) {
6538                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
6539                          (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
6540                 } else
6541                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
6542         }
6543         if (flags & ETH_RSS_NONFRAG_IPV4_UDP) {
6544                 if (type == I40E_MAC_X722) {
6545                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
6546                          (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
6547                          (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
6548                 } else
6549                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
6550         }
6551         if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
6552                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
6553         if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
6554                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
6555         if (flags & ETH_RSS_FRAG_IPV6)
6556                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
6557         if (flags & ETH_RSS_NONFRAG_IPV6_TCP) {
6558                 if (type == I40E_MAC_X722) {
6559                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
6560                          (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
6561                 } else
6562                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
6563         }
6564         if (flags & ETH_RSS_NONFRAG_IPV6_UDP) {
6565                 if (type == I40E_MAC_X722) {
6566                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
6567                          (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
6568                          (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
6569                 } else
6570                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
6571         }
6572         if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
6573                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
6574         if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
6575                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
6576         if (flags & ETH_RSS_L2_PAYLOAD)
6577                 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
6578
6579         return hena;
6580 }
6581
6582 /* Parse the hash enable flags */
6583 uint64_t
6584 i40e_parse_hena(uint64_t flags)
6585 {
6586         uint64_t rss_hf = 0;
6587
6588         if (!flags)
6589                 return rss_hf;
6590         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
6591                 rss_hf |= ETH_RSS_FRAG_IPV4;
6592         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
6593                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
6594         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK))
6595                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
6596         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
6597                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6598         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP))
6599                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6600         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP))
6601                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6602         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
6603                 rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
6604         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
6605                 rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
6606         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
6607                 rss_hf |= ETH_RSS_FRAG_IPV6;
6608         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
6609                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
6610         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
6611                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
6612         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
6613                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6614         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP))
6615                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6616         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
6617                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6618         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
6619                 rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
6620         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
6621                 rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
6622         if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
6623                 rss_hf |= ETH_RSS_L2_PAYLOAD;
6624
6625         return rss_hf;
6626 }
6627
6628 /* Disable RSS */
6629 static void
6630 i40e_pf_disable_rss(struct i40e_pf *pf)
6631 {
6632         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6633         uint64_t hena;
6634
6635         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6636         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6637         if (hw->mac.type == I40E_MAC_X722)
6638                 hena &= ~I40E_RSS_HENA_ALL_X722;
6639         else
6640                 hena &= ~I40E_RSS_HENA_ALL;
6641         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6642         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6643         I40E_WRITE_FLUSH(hw);
6644 }
6645
6646 static int
6647 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
6648 {
6649         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6650         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6651         int ret = 0;
6652
6653         if (!key || key_len == 0) {
6654                 PMD_DRV_LOG(DEBUG, "No key to be configured");
6655                 return 0;
6656         } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6657                 sizeof(uint32_t)) {
6658                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
6659                 return -EINVAL;
6660         }
6661
6662         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6663                 struct i40e_aqc_get_set_rss_key_data *key_dw =
6664                         (struct i40e_aqc_get_set_rss_key_data *)key;
6665
6666                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
6667                 if (ret)
6668                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
6669         } else {
6670                 uint32_t *hash_key = (uint32_t *)key;
6671                 uint16_t i;
6672
6673                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6674                         i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]);
6675                 I40E_WRITE_FLUSH(hw);
6676         }
6677
6678         return ret;
6679 }
6680
6681 static int
6682 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
6683 {
6684         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6685         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6686         int ret;
6687
6688         if (!key || !key_len)
6689                 return -EINVAL;
6690
6691         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6692                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
6693                         (struct i40e_aqc_get_set_rss_key_data *)key);
6694                 if (ret) {
6695                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
6696                         return ret;
6697                 }
6698         } else {
6699                 uint32_t *key_dw = (uint32_t *)key;
6700                 uint16_t i;
6701
6702                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6703                         key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
6704         }
6705         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
6706
6707         return 0;
6708 }
6709
6710 static int
6711 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
6712 {
6713         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6714         uint64_t rss_hf;
6715         uint64_t hena;
6716         int ret;
6717
6718         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
6719                                rss_conf->rss_key_len);
6720         if (ret)
6721                 return ret;
6722
6723         rss_hf = rss_conf->rss_hf;
6724         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6725         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6726         if (hw->mac.type == I40E_MAC_X722)
6727                 hena &= ~I40E_RSS_HENA_ALL_X722;
6728         else
6729                 hena &= ~I40E_RSS_HENA_ALL;
6730         hena |= i40e_config_hena(rss_hf, hw->mac.type);
6731         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6732         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6733         I40E_WRITE_FLUSH(hw);
6734
6735         return 0;
6736 }
6737
6738 static int
6739 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
6740                          struct rte_eth_rss_conf *rss_conf)
6741 {
6742         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6743         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6744         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
6745         uint64_t hena;
6746
6747         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6748         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6749         if (!(hena & ((hw->mac.type == I40E_MAC_X722)
6750                  ? I40E_RSS_HENA_ALL_X722
6751                  : I40E_RSS_HENA_ALL))) { /* RSS disabled */
6752                 if (rss_hf != 0) /* Enable RSS */
6753                         return -EINVAL;
6754                 return 0; /* Nothing to do */
6755         }
6756         /* RSS enabled */
6757         if (rss_hf == 0) /* Disable RSS */
6758                 return -EINVAL;
6759
6760         return i40e_hw_rss_hash_set(pf, rss_conf);
6761 }
6762
6763 static int
6764 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
6765                            struct rte_eth_rss_conf *rss_conf)
6766 {
6767         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6768         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6769         uint64_t hena;
6770
6771         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
6772                          &rss_conf->rss_key_len);
6773
6774         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6775         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6776         rss_conf->rss_hf = i40e_parse_hena(hena);
6777
6778         return 0;
6779 }
6780
6781 static int
6782 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
6783 {
6784         switch (filter_type) {
6785         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
6786                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
6787                 break;
6788         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
6789                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
6790                 break;
6791         case RTE_TUNNEL_FILTER_IMAC_TENID:
6792                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
6793                 break;
6794         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
6795                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
6796                 break;
6797         case ETH_TUNNEL_FILTER_IMAC:
6798                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
6799                 break;
6800         case ETH_TUNNEL_FILTER_OIP:
6801                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
6802                 break;
6803         case ETH_TUNNEL_FILTER_IIP:
6804                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
6805                 break;
6806         default:
6807                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
6808                 return -EINVAL;
6809         }
6810
6811         return 0;
6812 }
6813
6814 /* Convert tunnel filter structure */
6815 static int
6816 i40e_tunnel_filter_convert(
6817         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
6818         struct i40e_tunnel_filter *tunnel_filter)
6819 {
6820         ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
6821                         (struct ether_addr *)&tunnel_filter->input.outer_mac);
6822         ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
6823                         (struct ether_addr *)&tunnel_filter->input.inner_mac);
6824         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
6825         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
6826              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
6827             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
6828                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
6829         else
6830                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
6831         tunnel_filter->input.flags = cld_filter->element.flags;
6832         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
6833         tunnel_filter->queue = cld_filter->element.queue_number;
6834         rte_memcpy(tunnel_filter->input.general_fields,
6835                    cld_filter->general_fields,
6836                    sizeof(cld_filter->general_fields));
6837
6838         return 0;
6839 }
6840
6841 /* Check if there exists the tunnel filter */
6842 struct i40e_tunnel_filter *
6843 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
6844                              const struct i40e_tunnel_filter_input *input)
6845 {
6846         int ret;
6847
6848         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
6849         if (ret < 0)
6850                 return NULL;
6851
6852         return tunnel_rule->hash_map[ret];
6853 }
6854
6855 /* Add a tunnel filter into the SW list */
6856 static int
6857 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
6858                              struct i40e_tunnel_filter *tunnel_filter)
6859 {
6860         struct i40e_tunnel_rule *rule = &pf->tunnel;
6861         int ret;
6862
6863         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
6864         if (ret < 0) {
6865                 PMD_DRV_LOG(ERR,
6866                             "Failed to insert tunnel filter to hash table %d!",
6867                             ret);
6868                 return ret;
6869         }
6870         rule->hash_map[ret] = tunnel_filter;
6871
6872         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
6873
6874         return 0;
6875 }
6876
6877 /* Delete a tunnel filter from the SW list */
6878 int
6879 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
6880                           struct i40e_tunnel_filter_input *input)
6881 {
6882         struct i40e_tunnel_rule *rule = &pf->tunnel;
6883         struct i40e_tunnel_filter *tunnel_filter;
6884         int ret;
6885
6886         ret = rte_hash_del_key(rule->hash_table, input);
6887         if (ret < 0) {
6888                 PMD_DRV_LOG(ERR,
6889                             "Failed to delete tunnel filter to hash table %d!",
6890                             ret);
6891                 return ret;
6892         }
6893         tunnel_filter = rule->hash_map[ret];
6894         rule->hash_map[ret] = NULL;
6895
6896         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
6897         rte_free(tunnel_filter);
6898
6899         return 0;
6900 }
6901
6902 int
6903 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
6904                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
6905                         uint8_t add)
6906 {
6907         uint16_t ip_type;
6908         uint32_t ipv4_addr;
6909         uint8_t i, tun_type = 0;
6910         /* internal varialbe to convert ipv6 byte order */
6911         uint32_t convert_ipv6[4];
6912         int val, ret = 0;
6913         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6914         struct i40e_vsi *vsi = pf->main_vsi;
6915         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
6916         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
6917         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
6918         struct i40e_tunnel_filter *tunnel, *node;
6919         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
6920
6921         cld_filter = rte_zmalloc("tunnel_filter",
6922                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
6923         0);
6924
6925         if (NULL == cld_filter) {
6926                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
6927                 return -ENOMEM;
6928         }
6929         pfilter = cld_filter;
6930
6931         ether_addr_copy(&tunnel_filter->outer_mac,
6932                         (struct ether_addr *)&pfilter->element.outer_mac);
6933         ether_addr_copy(&tunnel_filter->inner_mac,
6934                         (struct ether_addr *)&pfilter->element.inner_mac);
6935
6936         pfilter->element.inner_vlan =
6937                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
6938         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
6939                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
6940                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
6941                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
6942                                 &rte_cpu_to_le_32(ipv4_addr),
6943                                 sizeof(pfilter->element.ipaddr.v4.data));
6944         } else {
6945                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
6946                 for (i = 0; i < 4; i++) {
6947                         convert_ipv6[i] =
6948                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
6949                 }
6950                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
6951                            &convert_ipv6,
6952                            sizeof(pfilter->element.ipaddr.v6.data));
6953         }
6954
6955         /* check tunneled type */
6956         switch (tunnel_filter->tunnel_type) {
6957         case RTE_TUNNEL_TYPE_VXLAN:
6958                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
6959                 break;
6960         case RTE_TUNNEL_TYPE_NVGRE:
6961                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
6962                 break;
6963         case RTE_TUNNEL_TYPE_IP_IN_GRE:
6964                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
6965                 break;
6966         default:
6967                 /* Other tunnel types is not supported. */
6968                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
6969                 rte_free(cld_filter);
6970                 return -EINVAL;
6971         }
6972
6973         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
6974                                        &pfilter->element.flags);
6975         if (val < 0) {
6976                 rte_free(cld_filter);
6977                 return -EINVAL;
6978         }
6979
6980         pfilter->element.flags |= rte_cpu_to_le_16(
6981                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
6982                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
6983         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
6984         pfilter->element.queue_number =
6985                 rte_cpu_to_le_16(tunnel_filter->queue_id);
6986
6987         /* Check if there is the filter in SW list */
6988         memset(&check_filter, 0, sizeof(check_filter));
6989         i40e_tunnel_filter_convert(cld_filter, &check_filter);
6990         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
6991         if (add && node) {
6992                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
6993                 return -EINVAL;
6994         }
6995
6996         if (!add && !node) {
6997                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
6998                 return -EINVAL;
6999         }
7000
7001         if (add) {
7002                 ret = i40e_aq_add_cloud_filters(hw,
7003                                         vsi->seid, &cld_filter->element, 1);
7004                 if (ret < 0) {
7005                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7006                         return -ENOTSUP;
7007                 }
7008                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7009                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7010                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7011         } else {
7012                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7013                                                    &cld_filter->element, 1);
7014                 if (ret < 0) {
7015                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7016                         return -ENOTSUP;
7017                 }
7018                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7019         }
7020
7021         rte_free(cld_filter);
7022         return ret;
7023 }
7024
7025 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7026 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7027 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7028 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7029 #define I40E_TR_GRE_KEY_MASK                    0x400
7030 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7031 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7032
7033 static enum
7034 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7035 {
7036         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7037         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7038         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7039         enum i40e_status_code status = I40E_SUCCESS;
7040
7041         memset(&filter_replace, 0,
7042                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7043         memset(&filter_replace_buf, 0,
7044                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7045
7046         /* create L1 filter */
7047         filter_replace.old_filter_type =
7048                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7049         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
7050         filter_replace.tr_bit = 0;
7051
7052         /* Prepare the buffer, 3 entries */
7053         filter_replace_buf.data[0] =
7054                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7055         filter_replace_buf.data[0] |=
7056                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7057         filter_replace_buf.data[2] = 0xFF;
7058         filter_replace_buf.data[3] = 0xFF;
7059         filter_replace_buf.data[4] =
7060                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7061         filter_replace_buf.data[4] |=
7062                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7063         filter_replace_buf.data[7] = 0xF0;
7064         filter_replace_buf.data[8]
7065                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7066         filter_replace_buf.data[8] |=
7067                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7068         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7069                 I40E_TR_GENEVE_KEY_MASK |
7070                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7071         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7072                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7073                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7074
7075         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7076                                                &filter_replace_buf);
7077         return status;
7078 }
7079
7080 static enum
7081 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7082 {
7083         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7084         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7085         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7086         enum i40e_status_code status = I40E_SUCCESS;
7087
7088         /* For MPLSoUDP */
7089         memset(&filter_replace, 0,
7090                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7091         memset(&filter_replace_buf, 0,
7092                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7093         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7094                 I40E_AQC_MIRROR_CLOUD_FILTER;
7095         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7096         filter_replace.new_filter_type =
7097                 I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
7098         /* Prepare the buffer, 2 entries */
7099         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7100         filter_replace_buf.data[0] |=
7101                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7102         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
7103         filter_replace_buf.data[4] |=
7104                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7105         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7106                                                &filter_replace_buf);
7107         if (status < 0)
7108                 return status;
7109
7110         /* For MPLSoGRE */
7111         memset(&filter_replace, 0,
7112                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7113         memset(&filter_replace_buf, 0,
7114                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7115
7116         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7117                 I40E_AQC_MIRROR_CLOUD_FILTER;
7118         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7119         filter_replace.new_filter_type =
7120                 I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
7121         /* Prepare the buffer, 2 entries */
7122         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7123         filter_replace_buf.data[0] |=
7124                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7125         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
7126         filter_replace_buf.data[4] |=
7127                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7128
7129         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7130                                                &filter_replace_buf);
7131         return status;
7132 }
7133
7134 int
7135 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7136                       struct i40e_tunnel_filter_conf *tunnel_filter,
7137                       uint8_t add)
7138 {
7139         uint16_t ip_type;
7140         uint32_t ipv4_addr;
7141         uint8_t i, tun_type = 0;
7142         /* internal variable to convert ipv6 byte order */
7143         uint32_t convert_ipv6[4];
7144         int val, ret = 0;
7145         struct i40e_pf_vf *vf = NULL;
7146         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7147         struct i40e_vsi *vsi;
7148         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7149         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7150         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7151         struct i40e_tunnel_filter *tunnel, *node;
7152         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7153         uint32_t teid_le;
7154         bool big_buffer = 0;
7155
7156         cld_filter = rte_zmalloc("tunnel_filter",
7157                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7158                          0);
7159
7160         if (cld_filter == NULL) {
7161                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7162                 return -ENOMEM;
7163         }
7164         pfilter = cld_filter;
7165
7166         ether_addr_copy(&tunnel_filter->outer_mac,
7167                         (struct ether_addr *)&pfilter->element.outer_mac);
7168         ether_addr_copy(&tunnel_filter->inner_mac,
7169                         (struct ether_addr *)&pfilter->element.inner_mac);
7170
7171         pfilter->element.inner_vlan =
7172                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7173         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7174                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7175                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7176                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7177                                 &rte_cpu_to_le_32(ipv4_addr),
7178                                 sizeof(pfilter->element.ipaddr.v4.data));
7179         } else {
7180                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7181                 for (i = 0; i < 4; i++) {
7182                         convert_ipv6[i] =
7183                         rte_cpu_to_le_32(rte_be_to_cpu_32(
7184                                          tunnel_filter->ip_addr.ipv6_addr[i]));
7185                 }
7186                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7187                            &convert_ipv6,
7188                            sizeof(pfilter->element.ipaddr.v6.data));
7189         }
7190
7191         /* check tunneled type */
7192         switch (tunnel_filter->tunnel_type) {
7193         case I40E_TUNNEL_TYPE_VXLAN:
7194                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7195                 break;
7196         case I40E_TUNNEL_TYPE_NVGRE:
7197                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7198                 break;
7199         case I40E_TUNNEL_TYPE_IP_IN_GRE:
7200                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7201                 break;
7202         case I40E_TUNNEL_TYPE_MPLSoUDP:
7203                 if (!pf->mpls_replace_flag) {
7204                         i40e_replace_mpls_l1_filter(pf);
7205                         i40e_replace_mpls_cloud_filter(pf);
7206                         pf->mpls_replace_flag = 1;
7207                 }
7208                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7209                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7210                         teid_le >> 4;
7211                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7212                         (teid_le & 0xF) << 12;
7213                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7214                         0x40;
7215                 big_buffer = 1;
7216                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP;
7217                 break;
7218         case I40E_TUNNEL_TYPE_MPLSoGRE:
7219                 if (!pf->mpls_replace_flag) {
7220                         i40e_replace_mpls_l1_filter(pf);
7221                         i40e_replace_mpls_cloud_filter(pf);
7222                         pf->mpls_replace_flag = 1;
7223                 }
7224                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7225                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7226                         teid_le >> 4;
7227                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7228                         (teid_le & 0xF) << 12;
7229                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7230                         0x0;
7231                 big_buffer = 1;
7232                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
7233                 break;
7234         case I40E_TUNNEL_TYPE_QINQ:
7235                 if (!pf->qinq_replace_flag) {
7236                         ret = i40e_cloud_filter_qinq_create(pf);
7237                         if (ret < 0)
7238                                 PMD_DRV_LOG(DEBUG,
7239                                             "QinQ tunnel filter already created.");
7240                         pf->qinq_replace_flag = 1;
7241                 }
7242                 /*      Add in the General fields the values of
7243                  *      the Outer and Inner VLAN
7244                  *      Big Buffer should be set, see changes in
7245                  *      i40e_aq_add_cloud_filters
7246                  */
7247                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
7248                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
7249                 big_buffer = 1;
7250                 break;
7251         default:
7252                 /* Other tunnel types is not supported. */
7253                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7254                 rte_free(cld_filter);
7255                 return -EINVAL;
7256         }
7257
7258         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
7259                 pfilter->element.flags =
7260                         I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
7261         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
7262                 pfilter->element.flags =
7263                         I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
7264         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
7265                 pfilter->element.flags |=
7266                         I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
7267         else {
7268                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7269                                                 &pfilter->element.flags);
7270                 if (val < 0) {
7271                         rte_free(cld_filter);
7272                         return -EINVAL;
7273                 }
7274         }
7275
7276         pfilter->element.flags |= rte_cpu_to_le_16(
7277                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7278                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7279         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7280         pfilter->element.queue_number =
7281                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7282
7283         if (!tunnel_filter->is_to_vf)
7284                 vsi = pf->main_vsi;
7285         else {
7286                 if (tunnel_filter->vf_id >= pf->vf_num) {
7287                         PMD_DRV_LOG(ERR, "Invalid argument.");
7288                         return -EINVAL;
7289                 }
7290                 vf = &pf->vfs[tunnel_filter->vf_id];
7291                 vsi = vf->vsi;
7292         }
7293
7294         /* Check if there is the filter in SW list */
7295         memset(&check_filter, 0, sizeof(check_filter));
7296         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7297         check_filter.is_to_vf = tunnel_filter->is_to_vf;
7298         check_filter.vf_id = tunnel_filter->vf_id;
7299         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7300         if (add && node) {
7301                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7302                 return -EINVAL;
7303         }
7304
7305         if (!add && !node) {
7306                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7307                 return -EINVAL;
7308         }
7309
7310         if (add) {
7311                 if (big_buffer)
7312                         ret = i40e_aq_add_cloud_filters_big_buffer(hw,
7313                                                    vsi->seid, cld_filter, 1);
7314                 else
7315                         ret = i40e_aq_add_cloud_filters(hw,
7316                                         vsi->seid, &cld_filter->element, 1);
7317                 if (ret < 0) {
7318                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7319                         return -ENOTSUP;
7320                 }
7321                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7322                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7323                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7324         } else {
7325                 if (big_buffer)
7326                         ret = i40e_aq_remove_cloud_filters_big_buffer(
7327                                 hw, vsi->seid, cld_filter, 1);
7328                 else
7329                         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7330                                                    &cld_filter->element, 1);
7331                 if (ret < 0) {
7332                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7333                         return -ENOTSUP;
7334                 }
7335                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7336         }
7337
7338         rte_free(cld_filter);
7339         return ret;
7340 }
7341
7342 static int
7343 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
7344 {
7345         uint8_t i;
7346
7347         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7348                 if (pf->vxlan_ports[i] == port)
7349                         return i;
7350         }
7351
7352         return -1;
7353 }
7354
7355 static int
7356 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
7357 {
7358         int  idx, ret;
7359         uint8_t filter_idx;
7360         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7361
7362         idx = i40e_get_vxlan_port_idx(pf, port);
7363
7364         /* Check if port already exists */
7365         if (idx >= 0) {
7366                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
7367                 return -EINVAL;
7368         }
7369
7370         /* Now check if there is space to add the new port */
7371         idx = i40e_get_vxlan_port_idx(pf, 0);
7372         if (idx < 0) {
7373                 PMD_DRV_LOG(ERR,
7374                         "Maximum number of UDP ports reached, not adding port %d",
7375                         port);
7376                 return -ENOSPC;
7377         }
7378
7379         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
7380                                         &filter_idx, NULL);
7381         if (ret < 0) {
7382                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
7383                 return -1;
7384         }
7385
7386         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
7387                          port,  filter_idx);
7388
7389         /* New port: add it and mark its index in the bitmap */
7390         pf->vxlan_ports[idx] = port;
7391         pf->vxlan_bitmap |= (1 << idx);
7392
7393         if (!(pf->flags & I40E_FLAG_VXLAN))
7394                 pf->flags |= I40E_FLAG_VXLAN;
7395
7396         return 0;
7397 }
7398
7399 static int
7400 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
7401 {
7402         int idx;
7403         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7404
7405         if (!(pf->flags & I40E_FLAG_VXLAN)) {
7406                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
7407                 return -EINVAL;
7408         }
7409
7410         idx = i40e_get_vxlan_port_idx(pf, port);
7411
7412         if (idx < 0) {
7413                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
7414                 return -EINVAL;
7415         }
7416
7417         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
7418                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
7419                 return -1;
7420         }
7421
7422         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
7423                         port, idx);
7424
7425         pf->vxlan_ports[idx] = 0;
7426         pf->vxlan_bitmap &= ~(1 << idx);
7427
7428         if (!pf->vxlan_bitmap)
7429                 pf->flags &= ~I40E_FLAG_VXLAN;
7430
7431         return 0;
7432 }
7433
7434 /* Add UDP tunneling port */
7435 static int
7436 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
7437                              struct rte_eth_udp_tunnel *udp_tunnel)
7438 {
7439         int ret = 0;
7440         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7441
7442         if (udp_tunnel == NULL)
7443                 return -EINVAL;
7444
7445         switch (udp_tunnel->prot_type) {
7446         case RTE_TUNNEL_TYPE_VXLAN:
7447                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
7448                 break;
7449
7450         case RTE_TUNNEL_TYPE_GENEVE:
7451         case RTE_TUNNEL_TYPE_TEREDO:
7452                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7453                 ret = -1;
7454                 break;
7455
7456         default:
7457                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7458                 ret = -1;
7459                 break;
7460         }
7461
7462         return ret;
7463 }
7464
7465 /* Remove UDP tunneling port */
7466 static int
7467 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
7468                              struct rte_eth_udp_tunnel *udp_tunnel)
7469 {
7470         int ret = 0;
7471         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7472
7473         if (udp_tunnel == NULL)
7474                 return -EINVAL;
7475
7476         switch (udp_tunnel->prot_type) {
7477         case RTE_TUNNEL_TYPE_VXLAN:
7478                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
7479                 break;
7480         case RTE_TUNNEL_TYPE_GENEVE:
7481         case RTE_TUNNEL_TYPE_TEREDO:
7482                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7483                 ret = -1;
7484                 break;
7485         default:
7486                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7487                 ret = -1;
7488                 break;
7489         }
7490
7491         return ret;
7492 }
7493
7494 /* Calculate the maximum number of contiguous PF queues that are configured */
7495 static int
7496 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
7497 {
7498         struct rte_eth_dev_data *data = pf->dev_data;
7499         int i, num;
7500         struct i40e_rx_queue *rxq;
7501
7502         num = 0;
7503         for (i = 0; i < pf->lan_nb_qps; i++) {
7504                 rxq = data->rx_queues[i];
7505                 if (rxq && rxq->q_set)
7506                         num++;
7507                 else
7508                         break;
7509         }
7510
7511         return num;
7512 }
7513
7514 /* Configure RSS */
7515 static int
7516 i40e_pf_config_rss(struct i40e_pf *pf)
7517 {
7518         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7519         struct rte_eth_rss_conf rss_conf;
7520         uint32_t i, lut = 0;
7521         uint16_t j, num;
7522
7523         /*
7524          * If both VMDQ and RSS enabled, not all of PF queues are configured.
7525          * It's necessary to calculate the actual PF queues that are configured.
7526          */
7527         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
7528                 num = i40e_pf_calc_configured_queues_num(pf);
7529         else
7530                 num = pf->dev_data->nb_rx_queues;
7531
7532         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
7533         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
7534                         num);
7535
7536         if (num == 0) {
7537                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
7538                 return -ENOTSUP;
7539         }
7540
7541         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
7542                 if (j == num)
7543                         j = 0;
7544                 lut = (lut << 8) | (j & ((0x1 <<
7545                         hw->func_caps.rss_table_entry_width) - 1));
7546                 if ((i & 3) == 3)
7547                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
7548         }
7549
7550         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
7551         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
7552                 i40e_pf_disable_rss(pf);
7553                 return 0;
7554         }
7555         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
7556                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
7557                 /* Random default keys */
7558                 static uint32_t rss_key_default[] = {0x6b793944,
7559                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
7560                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
7561                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
7562
7563                 rss_conf.rss_key = (uint8_t *)rss_key_default;
7564                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7565                                                         sizeof(uint32_t);
7566         }
7567
7568         return i40e_hw_rss_hash_set(pf, &rss_conf);
7569 }
7570
7571 static int
7572 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
7573                                struct rte_eth_tunnel_filter_conf *filter)
7574 {
7575         if (pf == NULL || filter == NULL) {
7576                 PMD_DRV_LOG(ERR, "Invalid parameter");
7577                 return -EINVAL;
7578         }
7579
7580         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
7581                 PMD_DRV_LOG(ERR, "Invalid queue ID");
7582                 return -EINVAL;
7583         }
7584
7585         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
7586                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
7587                 return -EINVAL;
7588         }
7589
7590         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
7591                 (is_zero_ether_addr(&filter->outer_mac))) {
7592                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
7593                 return -EINVAL;
7594         }
7595
7596         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
7597                 (is_zero_ether_addr(&filter->inner_mac))) {
7598                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
7599                 return -EINVAL;
7600         }
7601
7602         return 0;
7603 }
7604
7605 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
7606 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
7607 static int
7608 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
7609 {
7610         uint32_t val, reg;
7611         int ret = -EINVAL;
7612
7613         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
7614         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
7615
7616         if (len == 3) {
7617                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
7618         } else if (len == 4) {
7619                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
7620         } else {
7621                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
7622                 return ret;
7623         }
7624
7625         if (reg != val) {
7626                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
7627                                                    reg, NULL);
7628                 if (ret != 0)
7629                         return ret;
7630         } else {
7631                 ret = 0;
7632         }
7633         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
7634                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
7635
7636         return ret;
7637 }
7638
7639 static int
7640 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
7641 {
7642         int ret = -EINVAL;
7643
7644         if (!hw || !cfg)
7645                 return -EINVAL;
7646
7647         switch (cfg->cfg_type) {
7648         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
7649                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
7650                 break;
7651         default:
7652                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
7653                 break;
7654         }
7655
7656         return ret;
7657 }
7658
7659 static int
7660 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
7661                                enum rte_filter_op filter_op,
7662                                void *arg)
7663 {
7664         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7665         int ret = I40E_ERR_PARAM;
7666
7667         switch (filter_op) {
7668         case RTE_ETH_FILTER_SET:
7669                 ret = i40e_dev_global_config_set(hw,
7670                         (struct rte_eth_global_cfg *)arg);
7671                 break;
7672         default:
7673                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
7674                 break;
7675         }
7676
7677         return ret;
7678 }
7679
7680 static int
7681 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
7682                           enum rte_filter_op filter_op,
7683                           void *arg)
7684 {
7685         struct rte_eth_tunnel_filter_conf *filter;
7686         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7687         int ret = I40E_SUCCESS;
7688
7689         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
7690
7691         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
7692                 return I40E_ERR_PARAM;
7693
7694         switch (filter_op) {
7695         case RTE_ETH_FILTER_NOP:
7696                 if (!(pf->flags & I40E_FLAG_VXLAN))
7697                         ret = I40E_NOT_SUPPORTED;
7698                 break;
7699         case RTE_ETH_FILTER_ADD:
7700                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
7701                 break;
7702         case RTE_ETH_FILTER_DELETE:
7703                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
7704                 break;
7705         default:
7706                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
7707                 ret = I40E_ERR_PARAM;
7708                 break;
7709         }
7710
7711         return ret;
7712 }
7713
7714 static int
7715 i40e_pf_config_mq_rx(struct i40e_pf *pf)
7716 {
7717         int ret = 0;
7718         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
7719
7720         /* RSS setup */
7721         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
7722                 ret = i40e_pf_config_rss(pf);
7723         else
7724                 i40e_pf_disable_rss(pf);
7725
7726         return ret;
7727 }
7728
7729 /* Get the symmetric hash enable configurations per port */
7730 static void
7731 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
7732 {
7733         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
7734
7735         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
7736 }
7737
7738 /* Set the symmetric hash enable configurations per port */
7739 static void
7740 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
7741 {
7742         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
7743
7744         if (enable > 0) {
7745                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
7746                         PMD_DRV_LOG(INFO,
7747                                 "Symmetric hash has already been enabled");
7748                         return;
7749                 }
7750                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
7751         } else {
7752                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
7753                         PMD_DRV_LOG(INFO,
7754                                 "Symmetric hash has already been disabled");
7755                         return;
7756                 }
7757                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
7758         }
7759         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
7760         I40E_WRITE_FLUSH(hw);
7761 }
7762
7763 /*
7764  * Get global configurations of hash function type and symmetric hash enable
7765  * per flow type (pctype). Note that global configuration means it affects all
7766  * the ports on the same NIC.
7767  */
7768 static int
7769 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
7770                                    struct rte_eth_hash_global_conf *g_cfg)
7771 {
7772         uint32_t reg, mask = I40E_FLOW_TYPES;
7773         uint16_t i;
7774         enum i40e_filter_pctype pctype;
7775
7776         memset(g_cfg, 0, sizeof(*g_cfg));
7777         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
7778         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
7779                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
7780         else
7781                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
7782         PMD_DRV_LOG(DEBUG, "Hash function is %s",
7783                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
7784
7785         for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
7786                 if (!(mask & (1UL << i)))
7787                         continue;
7788                 mask &= ~(1UL << i);
7789                 /* Bit set indicats the coresponding flow type is supported */
7790                 g_cfg->valid_bit_mask[0] |= (1UL << i);
7791                 /* if flowtype is invalid, continue */
7792                 if (!I40E_VALID_FLOW(i))
7793                         continue;
7794                 pctype = i40e_flowtype_to_pctype(i);
7795                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype));
7796                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
7797                         g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
7798         }
7799
7800         return 0;
7801 }
7802
7803 static int
7804 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
7805 {
7806         uint32_t i;
7807         uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
7808
7809         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
7810                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
7811                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
7812                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
7813                                                 g_cfg->hash_func);
7814                 return -EINVAL;
7815         }
7816
7817         /*
7818          * As i40e supports less than 32 flow types, only first 32 bits need to
7819          * be checked.
7820          */
7821         mask0 = g_cfg->valid_bit_mask[0];
7822         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
7823                 if (i == 0) {
7824                         /* Check if any unsupported flow type configured */
7825                         if ((mask0 | i40e_mask) ^ i40e_mask)
7826                                 goto mask_err;
7827                 } else {
7828                         if (g_cfg->valid_bit_mask[i])
7829                                 goto mask_err;
7830                 }
7831         }
7832
7833         return 0;
7834
7835 mask_err:
7836         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
7837
7838         return -EINVAL;
7839 }
7840
7841 /*
7842  * Set global configurations of hash function type and symmetric hash enable
7843  * per flow type (pctype). Note any modifying global configuration will affect
7844  * all the ports on the same NIC.
7845  */
7846 static int
7847 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
7848                                    struct rte_eth_hash_global_conf *g_cfg)
7849 {
7850         int ret;
7851         uint16_t i;
7852         uint32_t reg;
7853         uint32_t mask0 = g_cfg->valid_bit_mask[0];
7854         enum i40e_filter_pctype pctype;
7855
7856         /* Check the input parameters */
7857         ret = i40e_hash_global_config_check(g_cfg);
7858         if (ret < 0)
7859                 return ret;
7860
7861         for (i = 0; mask0 && i < UINT32_BIT; i++) {
7862                 if (!(mask0 & (1UL << i)))
7863                         continue;
7864                 mask0 &= ~(1UL << i);
7865                 /* if flowtype is invalid, continue */
7866                 if (!I40E_VALID_FLOW(i))
7867                         continue;
7868                 pctype = i40e_flowtype_to_pctype(i);
7869                 reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
7870                                 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
7871                 if (hw->mac.type == I40E_MAC_X722) {
7872                         if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
7873                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7874                                   I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
7875                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7876                                   I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
7877                                   reg);
7878                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7879                                   I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
7880                                   reg);
7881                         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
7882                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7883                                   I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
7884                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7885                                   I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
7886                                   reg);
7887                         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
7888                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7889                                   I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
7890                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7891                                   I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
7892                                   reg);
7893                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7894                                   I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
7895                                   reg);
7896                         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
7897                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7898                                   I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
7899                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7900                                   I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
7901                                   reg);
7902                         } else {
7903                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
7904                                   reg);
7905                         }
7906                 } else {
7907                         i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
7908                 }
7909         }
7910
7911         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
7912         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
7913                 /* Toeplitz */
7914                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
7915                         PMD_DRV_LOG(DEBUG,
7916                                 "Hash function already set to Toeplitz");
7917                         goto out;
7918                 }
7919                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
7920         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
7921                 /* Simple XOR */
7922                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
7923                         PMD_DRV_LOG(DEBUG,
7924                                 "Hash function already set to Simple XOR");
7925                         goto out;
7926                 }
7927                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
7928         } else
7929                 /* Use the default, and keep it as it is */
7930                 goto out;
7931
7932         i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
7933
7934 out:
7935         I40E_WRITE_FLUSH(hw);
7936
7937         return 0;
7938 }
7939
7940 /**
7941  * Valid input sets for hash and flow director filters per PCTYPE
7942  */
7943 static uint64_t
7944 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
7945                 enum rte_filter_type filter)
7946 {
7947         uint64_t valid;
7948
7949         static const uint64_t valid_hash_inset_table[] = {
7950                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
7951                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7952                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7953                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
7954                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
7955                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7956                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7957                         I40E_INSET_FLEX_PAYLOAD,
7958                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7959                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7960                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7961                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7962                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7963                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7964                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7965                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7966                         I40E_INSET_FLEX_PAYLOAD,
7967                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
7968                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7969                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7970                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7971                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7972                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7973                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7974                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7975                         I40E_INSET_FLEX_PAYLOAD,
7976                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
7977                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7978                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7979                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7980                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7981                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7982                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7983                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7984                         I40E_INSET_FLEX_PAYLOAD,
7985                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7986                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7987                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7988                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7989                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7990                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7991                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7992                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7993                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
7994                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
7995                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7996                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7997                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7998                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7999                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8000                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8001                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8002                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8003                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8004                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8005                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8006                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8007                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8008                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8009                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8010                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8011                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
8012                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8013                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8014                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8015                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8016                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8017                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8018                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8019                         I40E_INSET_FLEX_PAYLOAD,
8020                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8021                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8022                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8023                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8024                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8025                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
8026                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
8027                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
8028                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8029                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8030                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8031                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8032                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8033                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8034                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8035                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
8036                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8037                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8038                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8039                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8040                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8041                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8042                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8043                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8044                         I40E_INSET_FLEX_PAYLOAD,
8045                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8046                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8047                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8048                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8049                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8050                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8051                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8052                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8053                         I40E_INSET_FLEX_PAYLOAD,
8054                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8055                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8056                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8057                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8058                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8059                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8060                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8061                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8062                         I40E_INSET_FLEX_PAYLOAD,
8063                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8064                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8065                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8066                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8067                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8068                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8069                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8070                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8071                         I40E_INSET_FLEX_PAYLOAD,
8072                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8073                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8074                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8075                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8076                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8077                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8078                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8079                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8080                         I40E_INSET_FLEX_PAYLOAD,
8081                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8082                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8083                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8084                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8085                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8086                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8087                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8088                         I40E_INSET_FLEX_PAYLOAD,
8089                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8090                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8091                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8092                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8093                         I40E_INSET_FLEX_PAYLOAD,
8094         };
8095
8096         /**
8097          * Flow director supports only fields defined in
8098          * union rte_eth_fdir_flow.
8099          */
8100         static const uint64_t valid_fdir_inset_table[] = {
8101                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8102                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8103                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8104                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8105                 I40E_INSET_IPV4_TTL,
8106                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8107                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8108                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8109                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8110                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8111                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8112                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8113                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8114                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8115                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8116                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8117                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8118                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8119                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8120                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8121                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8122                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8123                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8124                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8125                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8126                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8127                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8128                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8129                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8130                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8131                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8132                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8133                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8134                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8135                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8136                 I40E_INSET_SCTP_VT,
8137                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8138                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8139                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8140                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8141                 I40E_INSET_IPV4_TTL,
8142                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8143                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8144                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8145                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8146                 I40E_INSET_IPV6_HOP_LIMIT,
8147                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8148                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8149                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8150                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8151                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8152                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8153                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8154                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8155                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8156                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8157                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8158                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8159                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8160                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8161                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8162                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8163                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8164                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8165                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8166                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8167                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8168                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8169                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8170                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8171                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8172                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8173                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8174                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8175                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8176                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8177                 I40E_INSET_SCTP_VT,
8178                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8179                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8180                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8181                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8182                 I40E_INSET_IPV6_HOP_LIMIT,
8183                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8184                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8185                 I40E_INSET_LAST_ETHER_TYPE,
8186         };
8187
8188         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8189                 return 0;
8190         if (filter == RTE_ETH_FILTER_HASH)
8191                 valid = valid_hash_inset_table[pctype];
8192         else
8193                 valid = valid_fdir_inset_table[pctype];
8194
8195         return valid;
8196 }
8197
8198 /**
8199  * Validate if the input set is allowed for a specific PCTYPE
8200  */
8201 int
8202 i40e_validate_input_set(enum i40e_filter_pctype pctype,
8203                 enum rte_filter_type filter, uint64_t inset)
8204 {
8205         uint64_t valid;
8206
8207         valid = i40e_get_valid_input_set(pctype, filter);
8208         if (inset & (~valid))
8209                 return -EINVAL;
8210
8211         return 0;
8212 }
8213
8214 /* default input set fields combination per pctype */
8215 uint64_t
8216 i40e_get_default_input_set(uint16_t pctype)
8217 {
8218         static const uint64_t default_inset_table[] = {
8219                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8220                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8221                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8222                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8223                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8224                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8225                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8226                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8227                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8228                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8229                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8230                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8231                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8232                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8233                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8234                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8235                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8236                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8237                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8238                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8239                         I40E_INSET_SCTP_VT,
8240                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8241                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8242                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8243                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8244                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8245                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8246                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8247                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8248                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8249                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8250                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8251                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8252                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8253                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8254                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8255                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8256                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8257                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8258                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8259                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8260                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8261                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8262                         I40E_INSET_SCTP_VT,
8263                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8264                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8265                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8266                         I40E_INSET_LAST_ETHER_TYPE,
8267         };
8268
8269         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8270                 return 0;
8271
8272         return default_inset_table[pctype];
8273 }
8274
8275 /**
8276  * Parse the input set from index to logical bit masks
8277  */
8278 static int
8279 i40e_parse_input_set(uint64_t *inset,
8280                      enum i40e_filter_pctype pctype,
8281                      enum rte_eth_input_set_field *field,
8282                      uint16_t size)
8283 {
8284         uint16_t i, j;
8285         int ret = -EINVAL;
8286
8287         static const struct {
8288                 enum rte_eth_input_set_field field;
8289                 uint64_t inset;
8290         } inset_convert_table[] = {
8291                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
8292                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
8293                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
8294                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
8295                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
8296                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
8297                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
8298                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
8299                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
8300                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
8301                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
8302                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
8303                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
8304                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
8305                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
8306                         I40E_INSET_IPV6_NEXT_HDR},
8307                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
8308                         I40E_INSET_IPV6_HOP_LIMIT},
8309                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
8310                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
8311                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
8312                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
8313                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
8314                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
8315                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
8316                         I40E_INSET_SCTP_VT},
8317                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
8318                         I40E_INSET_TUNNEL_DMAC},
8319                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
8320                         I40E_INSET_VLAN_TUNNEL},
8321                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
8322                         I40E_INSET_TUNNEL_ID},
8323                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
8324                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
8325                         I40E_INSET_FLEX_PAYLOAD_W1},
8326                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
8327                         I40E_INSET_FLEX_PAYLOAD_W2},
8328                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
8329                         I40E_INSET_FLEX_PAYLOAD_W3},
8330                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
8331                         I40E_INSET_FLEX_PAYLOAD_W4},
8332                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
8333                         I40E_INSET_FLEX_PAYLOAD_W5},
8334                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
8335                         I40E_INSET_FLEX_PAYLOAD_W6},
8336                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
8337                         I40E_INSET_FLEX_PAYLOAD_W7},
8338                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
8339                         I40E_INSET_FLEX_PAYLOAD_W8},
8340         };
8341
8342         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
8343                 return ret;
8344
8345         /* Only one item allowed for default or all */
8346         if (size == 1) {
8347                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
8348                         *inset = i40e_get_default_input_set(pctype);
8349                         return 0;
8350                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
8351                         *inset = I40E_INSET_NONE;
8352                         return 0;
8353                 }
8354         }
8355
8356         for (i = 0, *inset = 0; i < size; i++) {
8357                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
8358                         if (field[i] == inset_convert_table[j].field) {
8359                                 *inset |= inset_convert_table[j].inset;
8360                                 break;
8361                         }
8362                 }
8363
8364                 /* It contains unsupported input set, return immediately */
8365                 if (j == RTE_DIM(inset_convert_table))
8366                         return ret;
8367         }
8368
8369         return 0;
8370 }
8371
8372 /**
8373  * Translate the input set from bit masks to register aware bit masks
8374  * and vice versa
8375  */
8376 uint64_t
8377 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
8378 {
8379         uint64_t val = 0;
8380         uint16_t i;
8381
8382         struct inset_map {
8383                 uint64_t inset;
8384                 uint64_t inset_reg;
8385         };
8386
8387         static const struct inset_map inset_map_common[] = {
8388                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
8389                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
8390                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
8391                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
8392                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
8393                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
8394                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
8395                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
8396                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
8397                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
8398                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
8399                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
8400                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
8401                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
8402                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
8403                 {I40E_INSET_TUNNEL_DMAC,
8404                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
8405                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
8406                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
8407                 {I40E_INSET_TUNNEL_SRC_PORT,
8408                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
8409                 {I40E_INSET_TUNNEL_DST_PORT,
8410                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
8411                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
8412                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
8413                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
8414                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
8415                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
8416                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
8417                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
8418                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
8419                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
8420         };
8421
8422     /* some different registers map in x722*/
8423         static const struct inset_map inset_map_diff_x722[] = {
8424                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
8425                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
8426                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
8427                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
8428         };
8429
8430         static const struct inset_map inset_map_diff_not_x722[] = {
8431                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
8432                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
8433                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
8434                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
8435         };
8436
8437         if (input == 0)
8438                 return val;
8439
8440         /* Translate input set to register aware inset */
8441         if (type == I40E_MAC_X722) {
8442                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
8443                         if (input & inset_map_diff_x722[i].inset)
8444                                 val |= inset_map_diff_x722[i].inset_reg;
8445                 }
8446         } else {
8447                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
8448                         if (input & inset_map_diff_not_x722[i].inset)
8449                                 val |= inset_map_diff_not_x722[i].inset_reg;
8450                 }
8451         }
8452
8453         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
8454                 if (input & inset_map_common[i].inset)
8455                         val |= inset_map_common[i].inset_reg;
8456         }
8457
8458         return val;
8459 }
8460
8461 int
8462 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
8463 {
8464         uint8_t i, idx = 0;
8465         uint64_t inset_need_mask = inset;
8466
8467         static const struct {
8468                 uint64_t inset;
8469                 uint32_t mask;
8470         } inset_mask_map[] = {
8471                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
8472                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
8473                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
8474                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
8475                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
8476                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
8477                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
8478                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
8479         };
8480
8481         if (!inset || !mask || !nb_elem)
8482                 return 0;
8483
8484         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
8485                 /* Clear the inset bit, if no MASK is required,
8486                  * for example proto + ttl
8487                  */
8488                 if ((inset & inset_mask_map[i].inset) ==
8489                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
8490                         inset_need_mask &= ~inset_mask_map[i].inset;
8491                 if (!inset_need_mask)
8492                         return 0;
8493         }
8494         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
8495                 if ((inset_need_mask & inset_mask_map[i].inset) ==
8496                     inset_mask_map[i].inset) {
8497                         if (idx >= nb_elem) {
8498                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
8499                                 return -EINVAL;
8500                         }
8501                         mask[idx] = inset_mask_map[i].mask;
8502                         idx++;
8503                 }
8504         }
8505
8506         return idx;
8507 }
8508
8509 void
8510 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
8511 {
8512         uint32_t reg = i40e_read_rx_ctl(hw, addr);
8513
8514         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
8515         if (reg != val)
8516                 i40e_write_rx_ctl(hw, addr, val);
8517         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
8518                     (uint32_t)i40e_read_rx_ctl(hw, addr));
8519 }
8520
8521 static void
8522 i40e_filter_input_set_init(struct i40e_pf *pf)
8523 {
8524         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8525         enum i40e_filter_pctype pctype;
8526         uint64_t input_set, inset_reg;
8527         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8528         int num, i;
8529
8530         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
8531              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
8532                 if (hw->mac.type == I40E_MAC_X722) {
8533                         if (!I40E_VALID_PCTYPE_X722(pctype))
8534                                 continue;
8535                 } else {
8536                         if (!I40E_VALID_PCTYPE(pctype))
8537                                 continue;
8538                 }
8539
8540                 input_set = i40e_get_default_input_set(pctype);
8541
8542                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8543                                                    I40E_INSET_MASK_NUM_REG);
8544                 if (num < 0)
8545                         return;
8546                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
8547                                         input_set);
8548
8549                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
8550                                       (uint32_t)(inset_reg & UINT32_MAX));
8551                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
8552                                      (uint32_t)((inset_reg >>
8553                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
8554                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
8555                                       (uint32_t)(inset_reg & UINT32_MAX));
8556                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
8557                                      (uint32_t)((inset_reg >>
8558                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
8559
8560                 for (i = 0; i < num; i++) {
8561                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8562                                              mask_reg[i]);
8563                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8564                                              mask_reg[i]);
8565                 }
8566                 /*clear unused mask registers of the pctype */
8567                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
8568                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8569                                              0);
8570                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8571                                              0);
8572                 }
8573                 I40E_WRITE_FLUSH(hw);
8574
8575                 /* store the default input set */
8576                 pf->hash_input_set[pctype] = input_set;
8577                 pf->fdir.input_set[pctype] = input_set;
8578         }
8579 }
8580
8581 int
8582 i40e_hash_filter_inset_select(struct i40e_hw *hw,
8583                          struct rte_eth_input_set_conf *conf)
8584 {
8585         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8586         enum i40e_filter_pctype pctype;
8587         uint64_t input_set, inset_reg = 0;
8588         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8589         int ret, i, num;
8590
8591         if (!conf) {
8592                 PMD_DRV_LOG(ERR, "Invalid pointer");
8593                 return -EFAULT;
8594         }
8595         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
8596             conf->op != RTE_ETH_INPUT_SET_ADD) {
8597                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
8598                 return -EINVAL;
8599         }
8600
8601         if (!I40E_VALID_FLOW(conf->flow_type)) {
8602                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
8603                 return -EINVAL;
8604         }
8605
8606         if (hw->mac.type == I40E_MAC_X722) {
8607                 /* get translated pctype value in fd pctype register */
8608                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
8609                         I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype(
8610                         conf->flow_type)));
8611         } else
8612                 pctype = i40e_flowtype_to_pctype(conf->flow_type);
8613
8614         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
8615                                    conf->inset_size);
8616         if (ret) {
8617                 PMD_DRV_LOG(ERR, "Failed to parse input set");
8618                 return -EINVAL;
8619         }
8620         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_HASH,
8621                                     input_set) != 0) {
8622                 PMD_DRV_LOG(ERR, "Invalid input set");
8623                 return -EINVAL;
8624         }
8625         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
8626                 /* get inset value in register */
8627                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
8628                 inset_reg <<= I40E_32_BIT_WIDTH;
8629                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
8630                 input_set |= pf->hash_input_set[pctype];
8631         }
8632         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8633                                            I40E_INSET_MASK_NUM_REG);
8634         if (num < 0)
8635                 return -EINVAL;
8636
8637         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
8638
8639         i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
8640                               (uint32_t)(inset_reg & UINT32_MAX));
8641         i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
8642                              (uint32_t)((inset_reg >>
8643                              I40E_32_BIT_WIDTH) & UINT32_MAX));
8644
8645         for (i = 0; i < num; i++)
8646                 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8647                                      mask_reg[i]);
8648         /*clear unused mask registers of the pctype */
8649         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
8650                 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8651                                      0);
8652         I40E_WRITE_FLUSH(hw);
8653
8654         pf->hash_input_set[pctype] = input_set;
8655         return 0;
8656 }
8657
8658 int
8659 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
8660                          struct rte_eth_input_set_conf *conf)
8661 {
8662         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8663         enum i40e_filter_pctype pctype;
8664         uint64_t input_set, inset_reg = 0;
8665         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8666         int ret, i, num;
8667
8668         if (!hw || !conf) {
8669                 PMD_DRV_LOG(ERR, "Invalid pointer");
8670                 return -EFAULT;
8671         }
8672         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
8673             conf->op != RTE_ETH_INPUT_SET_ADD) {
8674                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
8675                 return -EINVAL;
8676         }
8677
8678         if (!I40E_VALID_FLOW(conf->flow_type)) {
8679                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
8680                 return -EINVAL;
8681         }
8682
8683         pctype = i40e_flowtype_to_pctype(conf->flow_type);
8684
8685         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
8686                                    conf->inset_size);
8687         if (ret) {
8688                 PMD_DRV_LOG(ERR, "Failed to parse input set");
8689                 return -EINVAL;
8690         }
8691         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
8692                                     input_set) != 0) {
8693                 PMD_DRV_LOG(ERR, "Invalid input set");
8694                 return -EINVAL;
8695         }
8696
8697         /* get inset value in register */
8698         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
8699         inset_reg <<= I40E_32_BIT_WIDTH;
8700         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
8701
8702         /* Can not change the inset reg for flex payload for fdir,
8703          * it is done by writing I40E_PRTQF_FD_FLXINSET
8704          * in i40e_set_flex_mask_on_pctype.
8705          */
8706         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
8707                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
8708         else
8709                 input_set |= pf->fdir.input_set[pctype];
8710         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8711                                            I40E_INSET_MASK_NUM_REG);
8712         if (num < 0)
8713                 return -EINVAL;
8714
8715         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
8716
8717         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
8718                               (uint32_t)(inset_reg & UINT32_MAX));
8719         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
8720                              (uint32_t)((inset_reg >>
8721                              I40E_32_BIT_WIDTH) & UINT32_MAX));
8722
8723         for (i = 0; i < num; i++)
8724                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8725                                      mask_reg[i]);
8726         /*clear unused mask registers of the pctype */
8727         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
8728                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8729                                      0);
8730         I40E_WRITE_FLUSH(hw);
8731
8732         pf->fdir.input_set[pctype] = input_set;
8733         return 0;
8734 }
8735
8736 static int
8737 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
8738 {
8739         int ret = 0;
8740
8741         if (!hw || !info) {
8742                 PMD_DRV_LOG(ERR, "Invalid pointer");
8743                 return -EFAULT;
8744         }
8745
8746         switch (info->info_type) {
8747         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
8748                 i40e_get_symmetric_hash_enable_per_port(hw,
8749                                         &(info->info.enable));
8750                 break;
8751         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
8752                 ret = i40e_get_hash_filter_global_config(hw,
8753                                 &(info->info.global_conf));
8754                 break;
8755         default:
8756                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
8757                                                         info->info_type);
8758                 ret = -EINVAL;
8759                 break;
8760         }
8761
8762         return ret;
8763 }
8764
8765 static int
8766 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
8767 {
8768         int ret = 0;
8769
8770         if (!hw || !info) {
8771                 PMD_DRV_LOG(ERR, "Invalid pointer");
8772                 return -EFAULT;
8773         }
8774
8775         switch (info->info_type) {
8776         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
8777                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
8778                 break;
8779         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
8780                 ret = i40e_set_hash_filter_global_config(hw,
8781                                 &(info->info.global_conf));
8782                 break;
8783         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
8784                 ret = i40e_hash_filter_inset_select(hw,
8785                                                &(info->info.input_set_conf));
8786                 break;
8787
8788         default:
8789                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
8790                                                         info->info_type);
8791                 ret = -EINVAL;
8792                 break;
8793         }
8794
8795         return ret;
8796 }
8797
8798 /* Operations for hash function */
8799 static int
8800 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
8801                       enum rte_filter_op filter_op,
8802                       void *arg)
8803 {
8804         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8805         int ret = 0;
8806
8807         switch (filter_op) {
8808         case RTE_ETH_FILTER_NOP:
8809                 break;
8810         case RTE_ETH_FILTER_GET:
8811                 ret = i40e_hash_filter_get(hw,
8812                         (struct rte_eth_hash_filter_info *)arg);
8813                 break;
8814         case RTE_ETH_FILTER_SET:
8815                 ret = i40e_hash_filter_set(hw,
8816                         (struct rte_eth_hash_filter_info *)arg);
8817                 break;
8818         default:
8819                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
8820                                                                 filter_op);
8821                 ret = -ENOTSUP;
8822                 break;
8823         }
8824
8825         return ret;
8826 }
8827
8828 /* Convert ethertype filter structure */
8829 static int
8830 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
8831                               struct i40e_ethertype_filter *filter)
8832 {
8833         rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
8834         filter->input.ether_type = input->ether_type;
8835         filter->flags = input->flags;
8836         filter->queue = input->queue;
8837
8838         return 0;
8839 }
8840
8841 /* Check if there exists the ehtertype filter */
8842 struct i40e_ethertype_filter *
8843 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
8844                                 const struct i40e_ethertype_filter_input *input)
8845 {
8846         int ret;
8847
8848         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
8849         if (ret < 0)
8850                 return NULL;
8851
8852         return ethertype_rule->hash_map[ret];
8853 }
8854
8855 /* Add ethertype filter in SW list */
8856 static int
8857 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
8858                                 struct i40e_ethertype_filter *filter)
8859 {
8860         struct i40e_ethertype_rule *rule = &pf->ethertype;
8861         int ret;
8862
8863         ret = rte_hash_add_key(rule->hash_table, &filter->input);
8864         if (ret < 0) {
8865                 PMD_DRV_LOG(ERR,
8866                             "Failed to insert ethertype filter"
8867                             " to hash table %d!",
8868                             ret);
8869                 return ret;
8870         }
8871         rule->hash_map[ret] = filter;
8872
8873         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
8874
8875         return 0;
8876 }
8877
8878 /* Delete ethertype filter in SW list */
8879 int
8880 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
8881                              struct i40e_ethertype_filter_input *input)
8882 {
8883         struct i40e_ethertype_rule *rule = &pf->ethertype;
8884         struct i40e_ethertype_filter *filter;
8885         int ret;
8886
8887         ret = rte_hash_del_key(rule->hash_table, input);
8888         if (ret < 0) {
8889                 PMD_DRV_LOG(ERR,
8890                             "Failed to delete ethertype filter"
8891                             " to hash table %d!",
8892                             ret);
8893                 return ret;
8894         }
8895         filter = rule->hash_map[ret];
8896         rule->hash_map[ret] = NULL;
8897
8898         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
8899         rte_free(filter);
8900
8901         return 0;
8902 }
8903
8904 /*
8905  * Configure ethertype filter, which can director packet by filtering
8906  * with mac address and ether_type or only ether_type
8907  */
8908 int
8909 i40e_ethertype_filter_set(struct i40e_pf *pf,
8910                         struct rte_eth_ethertype_filter *filter,
8911                         bool add)
8912 {
8913         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8914         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
8915         struct i40e_ethertype_filter *ethertype_filter, *node;
8916         struct i40e_ethertype_filter check_filter;
8917         struct i40e_control_filter_stats stats;
8918         uint16_t flags = 0;
8919         int ret;
8920
8921         if (filter->queue >= pf->dev_data->nb_rx_queues) {
8922                 PMD_DRV_LOG(ERR, "Invalid queue ID");
8923                 return -EINVAL;
8924         }
8925         if (filter->ether_type == ETHER_TYPE_IPv4 ||
8926                 filter->ether_type == ETHER_TYPE_IPv6) {
8927                 PMD_DRV_LOG(ERR,
8928                         "unsupported ether_type(0x%04x) in control packet filter.",
8929                         filter->ether_type);
8930                 return -EINVAL;
8931         }
8932         if (filter->ether_type == ETHER_TYPE_VLAN)
8933                 PMD_DRV_LOG(WARNING,
8934                         "filter vlan ether_type in first tag is not supported.");
8935
8936         /* Check if there is the filter in SW list */
8937         memset(&check_filter, 0, sizeof(check_filter));
8938         i40e_ethertype_filter_convert(filter, &check_filter);
8939         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
8940                                                &check_filter.input);
8941         if (add && node) {
8942                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
8943                 return -EINVAL;
8944         }
8945
8946         if (!add && !node) {
8947                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
8948                 return -EINVAL;
8949         }
8950
8951         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
8952                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
8953         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
8954                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
8955         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
8956
8957         memset(&stats, 0, sizeof(stats));
8958         ret = i40e_aq_add_rem_control_packet_filter(hw,
8959                         filter->mac_addr.addr_bytes,
8960                         filter->ether_type, flags,
8961                         pf->main_vsi->seid,
8962                         filter->queue, add, &stats, NULL);
8963
8964         PMD_DRV_LOG(INFO,
8965                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
8966                 ret, stats.mac_etype_used, stats.etype_used,
8967                 stats.mac_etype_free, stats.etype_free);
8968         if (ret < 0)
8969                 return -ENOSYS;
8970
8971         /* Add or delete a filter in SW list */
8972         if (add) {
8973                 ethertype_filter = rte_zmalloc("ethertype_filter",
8974                                        sizeof(*ethertype_filter), 0);
8975                 rte_memcpy(ethertype_filter, &check_filter,
8976                            sizeof(check_filter));
8977                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
8978         } else {
8979                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
8980         }
8981
8982         return ret;
8983 }
8984
8985 /*
8986  * Handle operations for ethertype filter.
8987  */
8988 static int
8989 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
8990                                 enum rte_filter_op filter_op,
8991                                 void *arg)
8992 {
8993         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8994         int ret = 0;
8995
8996         if (filter_op == RTE_ETH_FILTER_NOP)
8997                 return ret;
8998
8999         if (arg == NULL) {
9000                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
9001                             filter_op);
9002                 return -EINVAL;
9003         }
9004
9005         switch (filter_op) {
9006         case RTE_ETH_FILTER_ADD:
9007                 ret = i40e_ethertype_filter_set(pf,
9008                         (struct rte_eth_ethertype_filter *)arg,
9009                         TRUE);
9010                 break;
9011         case RTE_ETH_FILTER_DELETE:
9012                 ret = i40e_ethertype_filter_set(pf,
9013                         (struct rte_eth_ethertype_filter *)arg,
9014                         FALSE);
9015                 break;
9016         default:
9017                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
9018                 ret = -ENOSYS;
9019                 break;
9020         }
9021         return ret;
9022 }
9023
9024 static int
9025 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9026                      enum rte_filter_type filter_type,
9027                      enum rte_filter_op filter_op,
9028                      void *arg)
9029 {
9030         int ret = 0;
9031
9032         if (dev == NULL)
9033                 return -EINVAL;
9034
9035         switch (filter_type) {
9036         case RTE_ETH_FILTER_NONE:
9037                 /* For global configuration */
9038                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9039                 break;
9040         case RTE_ETH_FILTER_HASH:
9041                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9042                 break;
9043         case RTE_ETH_FILTER_MACVLAN:
9044                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9045                 break;
9046         case RTE_ETH_FILTER_ETHERTYPE:
9047                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9048                 break;
9049         case RTE_ETH_FILTER_TUNNEL:
9050                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9051                 break;
9052         case RTE_ETH_FILTER_FDIR:
9053                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9054                 break;
9055         case RTE_ETH_FILTER_GENERIC:
9056                 if (filter_op != RTE_ETH_FILTER_GET)
9057                         return -EINVAL;
9058                 *(const void **)arg = &i40e_flow_ops;
9059                 break;
9060         default:
9061                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9062                                                         filter_type);
9063                 ret = -EINVAL;
9064                 break;
9065         }
9066
9067         return ret;
9068 }
9069
9070 /*
9071  * Check and enable Extended Tag.
9072  * Enabling Extended Tag is important for 40G performance.
9073  */
9074 static void
9075 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9076 {
9077         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9078         uint32_t buf = 0;
9079         int ret;
9080
9081         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9082                                       PCI_DEV_CAP_REG);
9083         if (ret < 0) {
9084                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9085                             PCI_DEV_CAP_REG);
9086                 return;
9087         }
9088         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9089                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9090                 return;
9091         }
9092
9093         buf = 0;
9094         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9095                                       PCI_DEV_CTRL_REG);
9096         if (ret < 0) {
9097                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9098                             PCI_DEV_CTRL_REG);
9099                 return;
9100         }
9101         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9102                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9103                 return;
9104         }
9105         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9106         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9107                                        PCI_DEV_CTRL_REG);
9108         if (ret < 0) {
9109                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9110                             PCI_DEV_CTRL_REG);
9111                 return;
9112         }
9113 }
9114
9115 /*
9116  * As some registers wouldn't be reset unless a global hardware reset,
9117  * hardware initialization is needed to put those registers into an
9118  * expected initial state.
9119  */
9120 static void
9121 i40e_hw_init(struct rte_eth_dev *dev)
9122 {
9123         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9124
9125         i40e_enable_extended_tag(dev);
9126
9127         /* clear the PF Queue Filter control register */
9128         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9129
9130         /* Disable symmetric hash per port */
9131         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9132 }
9133
9134 enum i40e_filter_pctype
9135 i40e_flowtype_to_pctype(uint16_t flow_type)
9136 {
9137         static const enum i40e_filter_pctype pctype_table[] = {
9138                 [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
9139                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
9140                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
9141                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
9142                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9143                 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
9144                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
9145                 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
9146                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
9147                 [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
9148                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
9149                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
9150                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
9151                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
9152                 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
9153                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
9154                 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
9155                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
9156                 [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
9157         };
9158
9159         return pctype_table[flow_type];
9160 }
9161
9162 uint16_t
9163 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
9164 {
9165         static const uint16_t flowtype_table[] = {
9166                 [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
9167                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9168                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
9169                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9170                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
9171                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9172                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
9173                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9174                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
9175                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9176                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
9177                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9178                         RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
9179                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9180                         RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
9181                 [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
9182                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9183                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
9184                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9185                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
9186                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9187                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
9188                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9189                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
9190                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9191                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
9192                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9193                         RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
9194                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9195                         RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
9196                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
9197         };
9198
9199         return flowtype_table[pctype];
9200 }
9201
9202 /*
9203  * On X710, performance number is far from the expectation on recent firmware
9204  * versions; on XL710, performance number is also far from the expectation on
9205  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9206  * mode is enabled and port MAC address is equal to the packet destination MAC
9207  * address. The fix for this issue may not be integrated in the following
9208  * firmware version. So the workaround in software driver is needed. It needs
9209  * to modify the initial values of 3 internal only registers for both X710 and
9210  * XL710. Note that the values for X710 or XL710 could be different, and the
9211  * workaround can be removed when it is fixed in firmware in the future.
9212  */
9213
9214 /* For both X710 and XL710 */
9215 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
9216 #define I40E_GL_SWR_PRI_JOIN_MAP_0       0x26CE00
9217
9218 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9219 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9220
9221 /* For X722 */
9222 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9223 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9224
9225 /* For X710 */
9226 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
9227 /* For XL710 */
9228 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
9229 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
9230
9231 static int
9232 i40e_dev_sync_phy_type(struct i40e_hw *hw)
9233 {
9234         enum i40e_status_code status;
9235         struct i40e_aq_get_phy_abilities_resp phy_ab;
9236         int ret = -ENOTSUP;
9237
9238         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
9239                                               NULL);
9240
9241         if (status) {
9242                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
9243                         status);
9244                 return ret;
9245         }
9246
9247         return 0;
9248 }
9249
9250 static void
9251 i40e_configure_registers(struct i40e_hw *hw)
9252 {
9253         static struct {
9254                 uint32_t addr;
9255                 uint64_t val;
9256         } reg_table[] = {
9257                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
9258                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
9259                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
9260         };
9261         uint64_t reg;
9262         uint32_t i;
9263         int ret;
9264
9265         for (i = 0; i < RTE_DIM(reg_table); i++) {
9266                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
9267                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9268                                 reg_table[i].val =
9269                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
9270                         else /* For X710/XL710/XXV710 */
9271                                 reg_table[i].val =
9272                                         I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE;
9273                 }
9274
9275                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
9276                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9277                                 reg_table[i].val =
9278                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9279                         else /* For X710/XL710/XXV710 */
9280                                 reg_table[i].val =
9281                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9282                 }
9283
9284                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
9285                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
9286                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
9287                                 reg_table[i].val =
9288                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
9289                         else /* For X710 */
9290                                 reg_table[i].val =
9291                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
9292                 }
9293
9294                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
9295                                                         &reg, NULL);
9296                 if (ret < 0) {
9297                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
9298                                                         reg_table[i].addr);
9299                         break;
9300                 }
9301                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
9302                                                 reg_table[i].addr, reg);
9303                 if (reg == reg_table[i].val)
9304                         continue;
9305
9306                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
9307                                                 reg_table[i].val, NULL);
9308                 if (ret < 0) {
9309                         PMD_DRV_LOG(ERR,
9310                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
9311                                 reg_table[i].val, reg_table[i].addr);
9312                         break;
9313                 }
9314                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
9315                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
9316         }
9317 }
9318
9319 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
9320 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
9321 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
9322 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
9323 static int
9324 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
9325 {
9326         uint32_t reg;
9327         int ret;
9328
9329         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
9330                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
9331                 return -EINVAL;
9332         }
9333
9334         /* Configure for double VLAN RX stripping */
9335         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
9336         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
9337                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
9338                 ret = i40e_aq_debug_write_register(hw,
9339                                                    I40E_VSI_TSR(vsi->vsi_id),
9340                                                    reg, NULL);
9341                 if (ret < 0) {
9342                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
9343                                     vsi->vsi_id);
9344                         return I40E_ERR_CONFIG;
9345                 }
9346         }
9347
9348         /* Configure for double VLAN TX insertion */
9349         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
9350         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
9351                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
9352                 ret = i40e_aq_debug_write_register(hw,
9353                                                    I40E_VSI_L2TAGSTXVALID(
9354                                                    vsi->vsi_id), reg, NULL);
9355                 if (ret < 0) {
9356                         PMD_DRV_LOG(ERR,
9357                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
9358                                 vsi->vsi_id);
9359                         return I40E_ERR_CONFIG;
9360                 }
9361         }
9362
9363         return 0;
9364 }
9365
9366 /**
9367  * i40e_aq_add_mirror_rule
9368  * @hw: pointer to the hardware structure
9369  * @seid: VEB seid to add mirror rule to
9370  * @dst_id: destination vsi seid
9371  * @entries: Buffer which contains the entities to be mirrored
9372  * @count: number of entities contained in the buffer
9373  * @rule_id:the rule_id of the rule to be added
9374  *
9375  * Add a mirror rule for a given veb.
9376  *
9377  **/
9378 static enum i40e_status_code
9379 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
9380                         uint16_t seid, uint16_t dst_id,
9381                         uint16_t rule_type, uint16_t *entries,
9382                         uint16_t count, uint16_t *rule_id)
9383 {
9384         struct i40e_aq_desc desc;
9385         struct i40e_aqc_add_delete_mirror_rule cmd;
9386         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
9387                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
9388                 &desc.params.raw;
9389         uint16_t buff_len;
9390         enum i40e_status_code status;
9391
9392         i40e_fill_default_direct_cmd_desc(&desc,
9393                                           i40e_aqc_opc_add_mirror_rule);
9394         memset(&cmd, 0, sizeof(cmd));
9395
9396         buff_len = sizeof(uint16_t) * count;
9397         desc.datalen = rte_cpu_to_le_16(buff_len);
9398         if (buff_len > 0)
9399                 desc.flags |= rte_cpu_to_le_16(
9400                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
9401         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
9402                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
9403         cmd.num_entries = rte_cpu_to_le_16(count);
9404         cmd.seid = rte_cpu_to_le_16(seid);
9405         cmd.destination = rte_cpu_to_le_16(dst_id);
9406
9407         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
9408         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
9409         PMD_DRV_LOG(INFO,
9410                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
9411                 hw->aq.asq_last_status, resp->rule_id,
9412                 resp->mirror_rules_used, resp->mirror_rules_free);
9413         *rule_id = rte_le_to_cpu_16(resp->rule_id);
9414
9415         return status;
9416 }
9417
9418 /**
9419  * i40e_aq_del_mirror_rule
9420  * @hw: pointer to the hardware structure
9421  * @seid: VEB seid to add mirror rule to
9422  * @entries: Buffer which contains the entities to be mirrored
9423  * @count: number of entities contained in the buffer
9424  * @rule_id:the rule_id of the rule to be delete
9425  *
9426  * Delete a mirror rule for a given veb.
9427  *
9428  **/
9429 static enum i40e_status_code
9430 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
9431                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
9432                 uint16_t count, uint16_t rule_id)
9433 {
9434         struct i40e_aq_desc desc;
9435         struct i40e_aqc_add_delete_mirror_rule cmd;
9436         uint16_t buff_len = 0;
9437         enum i40e_status_code status;
9438         void *buff = NULL;
9439
9440         i40e_fill_default_direct_cmd_desc(&desc,
9441                                           i40e_aqc_opc_delete_mirror_rule);
9442         memset(&cmd, 0, sizeof(cmd));
9443         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
9444                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
9445                                                           I40E_AQ_FLAG_RD));
9446                 cmd.num_entries = count;
9447                 buff_len = sizeof(uint16_t) * count;
9448                 desc.datalen = rte_cpu_to_le_16(buff_len);
9449                 buff = (void *)entries;
9450         } else
9451                 /* rule id is filled in destination field for deleting mirror rule */
9452                 cmd.destination = rte_cpu_to_le_16(rule_id);
9453
9454         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
9455                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
9456         cmd.seid = rte_cpu_to_le_16(seid);
9457
9458         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
9459         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
9460
9461         return status;
9462 }
9463
9464 /**
9465  * i40e_mirror_rule_set
9466  * @dev: pointer to the hardware structure
9467  * @mirror_conf: mirror rule info
9468  * @sw_id: mirror rule's sw_id
9469  * @on: enable/disable
9470  *
9471  * set a mirror rule.
9472  *
9473  **/
9474 static int
9475 i40e_mirror_rule_set(struct rte_eth_dev *dev,
9476                         struct rte_eth_mirror_conf *mirror_conf,
9477                         uint8_t sw_id, uint8_t on)
9478 {
9479         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9480         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9481         struct i40e_mirror_rule *it, *mirr_rule = NULL;
9482         struct i40e_mirror_rule *parent = NULL;
9483         uint16_t seid, dst_seid, rule_id;
9484         uint16_t i, j = 0;
9485         int ret;
9486
9487         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
9488
9489         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
9490                 PMD_DRV_LOG(ERR,
9491                         "mirror rule can not be configured without veb or vfs.");
9492                 return -ENOSYS;
9493         }
9494         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
9495                 PMD_DRV_LOG(ERR, "mirror table is full.");
9496                 return -ENOSPC;
9497         }
9498         if (mirror_conf->dst_pool > pf->vf_num) {
9499                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
9500                                  mirror_conf->dst_pool);
9501                 return -EINVAL;
9502         }
9503
9504         seid = pf->main_vsi->veb->seid;
9505
9506         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
9507                 if (sw_id <= it->index) {
9508                         mirr_rule = it;
9509                         break;
9510                 }
9511                 parent = it;
9512         }
9513         if (mirr_rule && sw_id == mirr_rule->index) {
9514                 if (on) {
9515                         PMD_DRV_LOG(ERR, "mirror rule exists.");
9516                         return -EEXIST;
9517                 } else {
9518                         ret = i40e_aq_del_mirror_rule(hw, seid,
9519                                         mirr_rule->rule_type,
9520                                         mirr_rule->entries,
9521                                         mirr_rule->num_entries, mirr_rule->id);
9522                         if (ret < 0) {
9523                                 PMD_DRV_LOG(ERR,
9524                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
9525                                         ret, hw->aq.asq_last_status);
9526                                 return -ENOSYS;
9527                         }
9528                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
9529                         rte_free(mirr_rule);
9530                         pf->nb_mirror_rule--;
9531                         return 0;
9532                 }
9533         } else if (!on) {
9534                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
9535                 return -ENOENT;
9536         }
9537
9538         mirr_rule = rte_zmalloc("i40e_mirror_rule",
9539                                 sizeof(struct i40e_mirror_rule) , 0);
9540         if (!mirr_rule) {
9541                 PMD_DRV_LOG(ERR, "failed to allocate memory");
9542                 return I40E_ERR_NO_MEMORY;
9543         }
9544         switch (mirror_conf->rule_type) {
9545         case ETH_MIRROR_VLAN:
9546                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
9547                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
9548                                 mirr_rule->entries[j] =
9549                                         mirror_conf->vlan.vlan_id[i];
9550                                 j++;
9551                         }
9552                 }
9553                 if (j == 0) {
9554                         PMD_DRV_LOG(ERR, "vlan is not specified.");
9555                         rte_free(mirr_rule);
9556                         return -EINVAL;
9557                 }
9558                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
9559                 break;
9560         case ETH_MIRROR_VIRTUAL_POOL_UP:
9561         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
9562                 /* check if the specified pool bit is out of range */
9563                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
9564                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
9565                         rte_free(mirr_rule);
9566                         return -EINVAL;
9567                 }
9568                 for (i = 0, j = 0; i < pf->vf_num; i++) {
9569                         if (mirror_conf->pool_mask & (1ULL << i)) {
9570                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
9571                                 j++;
9572                         }
9573                 }
9574                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
9575                         /* add pf vsi to entries */
9576                         mirr_rule->entries[j] = pf->main_vsi_seid;
9577                         j++;
9578                 }
9579                 if (j == 0) {
9580                         PMD_DRV_LOG(ERR, "pool is not specified.");
9581                         rte_free(mirr_rule);
9582                         return -EINVAL;
9583                 }
9584                 /* egress and ingress in aq commands means from switch but not port */
9585                 mirr_rule->rule_type =
9586                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
9587                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
9588                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
9589                 break;
9590         case ETH_MIRROR_UPLINK_PORT:
9591                 /* egress and ingress in aq commands means from switch but not port*/
9592                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
9593                 break;
9594         case ETH_MIRROR_DOWNLINK_PORT:
9595                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
9596                 break;
9597         default:
9598                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
9599                         mirror_conf->rule_type);
9600                 rte_free(mirr_rule);
9601                 return -EINVAL;
9602         }
9603
9604         /* If the dst_pool is equal to vf_num, consider it as PF */
9605         if (mirror_conf->dst_pool == pf->vf_num)
9606                 dst_seid = pf->main_vsi_seid;
9607         else
9608                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
9609
9610         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
9611                                       mirr_rule->rule_type, mirr_rule->entries,
9612                                       j, &rule_id);
9613         if (ret < 0) {
9614                 PMD_DRV_LOG(ERR,
9615                         "failed to add mirror rule: ret = %d, aq_err = %d.",
9616                         ret, hw->aq.asq_last_status);
9617                 rte_free(mirr_rule);
9618                 return -ENOSYS;
9619         }
9620
9621         mirr_rule->index = sw_id;
9622         mirr_rule->num_entries = j;
9623         mirr_rule->id = rule_id;
9624         mirr_rule->dst_vsi_seid = dst_seid;
9625
9626         if (parent)
9627                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
9628         else
9629                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
9630
9631         pf->nb_mirror_rule++;
9632         return 0;
9633 }
9634
9635 /**
9636  * i40e_mirror_rule_reset
9637  * @dev: pointer to the device
9638  * @sw_id: mirror rule's sw_id
9639  *
9640  * reset a mirror rule.
9641  *
9642  **/
9643 static int
9644 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
9645 {
9646         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9647         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9648         struct i40e_mirror_rule *it, *mirr_rule = NULL;
9649         uint16_t seid;
9650         int ret;
9651
9652         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
9653
9654         seid = pf->main_vsi->veb->seid;
9655
9656         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
9657                 if (sw_id == it->index) {
9658                         mirr_rule = it;
9659                         break;
9660                 }
9661         }
9662         if (mirr_rule) {
9663                 ret = i40e_aq_del_mirror_rule(hw, seid,
9664                                 mirr_rule->rule_type,
9665                                 mirr_rule->entries,
9666                                 mirr_rule->num_entries, mirr_rule->id);
9667                 if (ret < 0) {
9668                         PMD_DRV_LOG(ERR,
9669                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
9670                                 ret, hw->aq.asq_last_status);
9671                         return -ENOSYS;
9672                 }
9673                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
9674                 rte_free(mirr_rule);
9675                 pf->nb_mirror_rule--;
9676         } else {
9677                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
9678                 return -ENOENT;
9679         }
9680         return 0;
9681 }
9682
9683 static uint64_t
9684 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
9685 {
9686         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9687         uint64_t systim_cycles;
9688
9689         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
9690         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
9691                         << 32;
9692
9693         return systim_cycles;
9694 }
9695
9696 static uint64_t
9697 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
9698 {
9699         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9700         uint64_t rx_tstamp;
9701
9702         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
9703         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
9704                         << 32;
9705
9706         return rx_tstamp;
9707 }
9708
9709 static uint64_t
9710 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
9711 {
9712         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9713         uint64_t tx_tstamp;
9714
9715         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
9716         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
9717                         << 32;
9718
9719         return tx_tstamp;
9720 }
9721
9722 static void
9723 i40e_start_timecounters(struct rte_eth_dev *dev)
9724 {
9725         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9726         struct i40e_adapter *adapter =
9727                         (struct i40e_adapter *)dev->data->dev_private;
9728         struct rte_eth_link link;
9729         uint32_t tsync_inc_l;
9730         uint32_t tsync_inc_h;
9731
9732         /* Get current link speed. */
9733         memset(&link, 0, sizeof(link));
9734         i40e_dev_link_update(dev, 1);
9735         rte_i40e_dev_atomic_read_link_status(dev, &link);
9736
9737         switch (link.link_speed) {
9738         case ETH_SPEED_NUM_40G:
9739                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
9740                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
9741                 break;
9742         case ETH_SPEED_NUM_10G:
9743                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
9744                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
9745                 break;
9746         case ETH_SPEED_NUM_1G:
9747                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
9748                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
9749                 break;
9750         default:
9751                 tsync_inc_l = 0x0;
9752                 tsync_inc_h = 0x0;
9753         }
9754
9755         /* Set the timesync increment value. */
9756         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
9757         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
9758
9759         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
9760         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
9761         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
9762
9763         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
9764         adapter->systime_tc.cc_shift = 0;
9765         adapter->systime_tc.nsec_mask = 0;
9766
9767         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
9768         adapter->rx_tstamp_tc.cc_shift = 0;
9769         adapter->rx_tstamp_tc.nsec_mask = 0;
9770
9771         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
9772         adapter->tx_tstamp_tc.cc_shift = 0;
9773         adapter->tx_tstamp_tc.nsec_mask = 0;
9774 }
9775
9776 static int
9777 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
9778 {
9779         struct i40e_adapter *adapter =
9780                         (struct i40e_adapter *)dev->data->dev_private;
9781
9782         adapter->systime_tc.nsec += delta;
9783         adapter->rx_tstamp_tc.nsec += delta;
9784         adapter->tx_tstamp_tc.nsec += delta;
9785
9786         return 0;
9787 }
9788
9789 static int
9790 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
9791 {
9792         uint64_t ns;
9793         struct i40e_adapter *adapter =
9794                         (struct i40e_adapter *)dev->data->dev_private;
9795
9796         ns = rte_timespec_to_ns(ts);
9797
9798         /* Set the timecounters to a new value. */
9799         adapter->systime_tc.nsec = ns;
9800         adapter->rx_tstamp_tc.nsec = ns;
9801         adapter->tx_tstamp_tc.nsec = ns;
9802
9803         return 0;
9804 }
9805
9806 static int
9807 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
9808 {
9809         uint64_t ns, systime_cycles;
9810         struct i40e_adapter *adapter =
9811                         (struct i40e_adapter *)dev->data->dev_private;
9812
9813         systime_cycles = i40e_read_systime_cyclecounter(dev);
9814         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
9815         *ts = rte_ns_to_timespec(ns);
9816
9817         return 0;
9818 }
9819
9820 static int
9821 i40e_timesync_enable(struct rte_eth_dev *dev)
9822 {
9823         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9824         uint32_t tsync_ctl_l;
9825         uint32_t tsync_ctl_h;
9826
9827         /* Stop the timesync system time. */
9828         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
9829         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
9830         /* Reset the timesync system time value. */
9831         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
9832         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
9833
9834         i40e_start_timecounters(dev);
9835
9836         /* Clear timesync registers. */
9837         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
9838         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
9839         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
9840         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
9841         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
9842         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
9843
9844         /* Enable timestamping of PTP packets. */
9845         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
9846         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
9847
9848         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
9849         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
9850         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
9851
9852         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
9853         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
9854
9855         return 0;
9856 }
9857
9858 static int
9859 i40e_timesync_disable(struct rte_eth_dev *dev)
9860 {
9861         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9862         uint32_t tsync_ctl_l;
9863         uint32_t tsync_ctl_h;
9864
9865         /* Disable timestamping of transmitted PTP packets. */
9866         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
9867         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
9868
9869         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
9870         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
9871
9872         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
9873         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
9874
9875         /* Reset the timesync increment value. */
9876         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
9877         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
9878
9879         return 0;
9880 }
9881
9882 static int
9883 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
9884                                 struct timespec *timestamp, uint32_t flags)
9885 {
9886         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9887         struct i40e_adapter *adapter =
9888                 (struct i40e_adapter *)dev->data->dev_private;
9889
9890         uint32_t sync_status;
9891         uint32_t index = flags & 0x03;
9892         uint64_t rx_tstamp_cycles;
9893         uint64_t ns;
9894
9895         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
9896         if ((sync_status & (1 << index)) == 0)
9897                 return -EINVAL;
9898
9899         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
9900         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
9901         *timestamp = rte_ns_to_timespec(ns);
9902
9903         return 0;
9904 }
9905
9906 static int
9907 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
9908                                 struct timespec *timestamp)
9909 {
9910         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9911         struct i40e_adapter *adapter =
9912                 (struct i40e_adapter *)dev->data->dev_private;
9913
9914         uint32_t sync_status;
9915         uint64_t tx_tstamp_cycles;
9916         uint64_t ns;
9917
9918         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
9919         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
9920                 return -EINVAL;
9921
9922         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
9923         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
9924         *timestamp = rte_ns_to_timespec(ns);
9925
9926         return 0;
9927 }
9928
9929 /*
9930  * i40e_parse_dcb_configure - parse dcb configure from user
9931  * @dev: the device being configured
9932  * @dcb_cfg: pointer of the result of parse
9933  * @*tc_map: bit map of enabled traffic classes
9934  *
9935  * Returns 0 on success, negative value on failure
9936  */
9937 static int
9938 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
9939                          struct i40e_dcbx_config *dcb_cfg,
9940                          uint8_t *tc_map)
9941 {
9942         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
9943         uint8_t i, tc_bw, bw_lf;
9944
9945         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
9946
9947         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
9948         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
9949                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
9950                 return -EINVAL;
9951         }
9952
9953         /* assume each tc has the same bw */
9954         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
9955         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
9956                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
9957         /* to ensure the sum of tcbw is equal to 100 */
9958         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
9959         for (i = 0; i < bw_lf; i++)
9960                 dcb_cfg->etscfg.tcbwtable[i]++;
9961
9962         /* assume each tc has the same Transmission Selection Algorithm */
9963         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
9964                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
9965
9966         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
9967                 dcb_cfg->etscfg.prioritytable[i] =
9968                                 dcb_rx_conf->dcb_tc[i];
9969
9970         /* FW needs one App to configure HW */
9971         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
9972         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
9973         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
9974         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
9975
9976         if (dcb_rx_conf->nb_tcs == 0)
9977                 *tc_map = 1; /* tc0 only */
9978         else
9979                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
9980
9981         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
9982                 dcb_cfg->pfc.willing = 0;
9983                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
9984                 dcb_cfg->pfc.pfcenable = *tc_map;
9985         }
9986         return 0;
9987 }
9988
9989
9990 static enum i40e_status_code
9991 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
9992                               struct i40e_aqc_vsi_properties_data *info,
9993                               uint8_t enabled_tcmap)
9994 {
9995         enum i40e_status_code ret;
9996         int i, total_tc = 0;
9997         uint16_t qpnum_per_tc, bsf, qp_idx;
9998         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
9999         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10000         uint16_t used_queues;
10001
10002         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10003         if (ret != I40E_SUCCESS)
10004                 return ret;
10005
10006         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10007                 if (enabled_tcmap & (1 << i))
10008                         total_tc++;
10009         }
10010         if (total_tc == 0)
10011                 total_tc = 1;
10012         vsi->enabled_tc = enabled_tcmap;
10013
10014         /* different VSI has different queues assigned */
10015         if (vsi->type == I40E_VSI_MAIN)
10016                 used_queues = dev_data->nb_rx_queues -
10017                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10018         else if (vsi->type == I40E_VSI_VMDQ2)
10019                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10020         else {
10021                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10022                 return I40E_ERR_NO_AVAILABLE_VSI;
10023         }
10024
10025         qpnum_per_tc = used_queues / total_tc;
10026         /* Number of queues per enabled TC */
10027         if (qpnum_per_tc == 0) {
10028                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10029                 return I40E_ERR_INVALID_QP_ID;
10030         }
10031         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10032                                 I40E_MAX_Q_PER_TC);
10033         bsf = rte_bsf32(qpnum_per_tc);
10034
10035         /**
10036          * Configure TC and queue mapping parameters, for enabled TC,
10037          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10038          * default queue will serve it.
10039          */
10040         qp_idx = 0;
10041         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10042                 if (vsi->enabled_tc & (1 << i)) {
10043                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10044                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10045                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10046                         qp_idx += qpnum_per_tc;
10047                 } else
10048                         info->tc_mapping[i] = 0;
10049         }
10050
10051         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10052         if (vsi->type == I40E_VSI_SRIOV) {
10053                 info->mapping_flags |=
10054                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10055                 for (i = 0; i < vsi->nb_qps; i++)
10056                         info->queue_mapping[i] =
10057                                 rte_cpu_to_le_16(vsi->base_queue + i);
10058         } else {
10059                 info->mapping_flags |=
10060                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10061                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10062         }
10063         info->valid_sections |=
10064                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10065
10066         return I40E_SUCCESS;
10067 }
10068
10069 /*
10070  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10071  * @veb: VEB to be configured
10072  * @tc_map: enabled TC bitmap
10073  *
10074  * Returns 0 on success, negative value on failure
10075  */
10076 static enum i40e_status_code
10077 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10078 {
10079         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10080         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10081         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10082         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10083         enum i40e_status_code ret = I40E_SUCCESS;
10084         int i;
10085         uint32_t bw_max;
10086
10087         /* Check if enabled_tc is same as existing or new TCs */
10088         if (veb->enabled_tc == tc_map)
10089                 return ret;
10090
10091         /* configure tc bandwidth */
10092         memset(&veb_bw, 0, sizeof(veb_bw));
10093         veb_bw.tc_valid_bits = tc_map;
10094         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10095         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10096                 if (tc_map & BIT_ULL(i))
10097                         veb_bw.tc_bw_share_credits[i] = 1;
10098         }
10099         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10100                                                    &veb_bw, NULL);
10101         if (ret) {
10102                 PMD_INIT_LOG(ERR,
10103                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10104                         hw->aq.asq_last_status);
10105                 return ret;
10106         }
10107
10108         memset(&ets_query, 0, sizeof(ets_query));
10109         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10110                                                    &ets_query, NULL);
10111         if (ret != I40E_SUCCESS) {
10112                 PMD_DRV_LOG(ERR,
10113                         "Failed to get switch_comp ETS configuration %u",
10114                         hw->aq.asq_last_status);
10115                 return ret;
10116         }
10117         memset(&bw_query, 0, sizeof(bw_query));
10118         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10119                                                   &bw_query, NULL);
10120         if (ret != I40E_SUCCESS) {
10121                 PMD_DRV_LOG(ERR,
10122                         "Failed to get switch_comp bandwidth configuration %u",
10123                         hw->aq.asq_last_status);
10124                 return ret;
10125         }
10126
10127         /* store and print out BW info */
10128         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10129         veb->bw_info.bw_max = ets_query.tc_bw_max;
10130         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10131         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10132         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10133                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10134                      I40E_16_BIT_WIDTH);
10135         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10136                 veb->bw_info.bw_ets_share_credits[i] =
10137                                 bw_query.tc_bw_share_credits[i];
10138                 veb->bw_info.bw_ets_credits[i] =
10139                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10140                 /* 4 bits per TC, 4th bit is reserved */
10141                 veb->bw_info.bw_ets_max[i] =
10142                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10143                                   RTE_LEN2MASK(3, uint8_t));
10144                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10145                             veb->bw_info.bw_ets_share_credits[i]);
10146                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10147                             veb->bw_info.bw_ets_credits[i]);
10148                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10149                             veb->bw_info.bw_ets_max[i]);
10150         }
10151
10152         veb->enabled_tc = tc_map;
10153
10154         return ret;
10155 }
10156
10157
10158 /*
10159  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10160  * @vsi: VSI to be configured
10161  * @tc_map: enabled TC bitmap
10162  *
10163  * Returns 0 on success, negative value on failure
10164  */
10165 static enum i40e_status_code
10166 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10167 {
10168         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10169         struct i40e_vsi_context ctxt;
10170         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10171         enum i40e_status_code ret = I40E_SUCCESS;
10172         int i;
10173
10174         /* Check if enabled_tc is same as existing or new TCs */
10175         if (vsi->enabled_tc == tc_map)
10176                 return ret;
10177
10178         /* configure tc bandwidth */
10179         memset(&bw_data, 0, sizeof(bw_data));
10180         bw_data.tc_valid_bits = tc_map;
10181         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10182         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10183                 if (tc_map & BIT_ULL(i))
10184                         bw_data.tc_bw_credits[i] = 1;
10185         }
10186         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10187         if (ret) {
10188                 PMD_INIT_LOG(ERR,
10189                         "AQ command Config VSI BW allocation per TC failed = %d",
10190                         hw->aq.asq_last_status);
10191                 goto out;
10192         }
10193         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10194                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10195
10196         /* Update Queue Pairs Mapping for currently enabled UPs */
10197         ctxt.seid = vsi->seid;
10198         ctxt.pf_num = hw->pf_id;
10199         ctxt.vf_num = 0;
10200         ctxt.uplink_seid = vsi->uplink_seid;
10201         ctxt.info = vsi->info;
10202         i40e_get_cap(hw);
10203         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10204         if (ret)
10205                 goto out;
10206
10207         /* Update the VSI after updating the VSI queue-mapping information */
10208         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10209         if (ret) {
10210                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10211                         hw->aq.asq_last_status);
10212                 goto out;
10213         }
10214         /* update the local VSI info with updated queue map */
10215         (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10216                                         sizeof(vsi->info.tc_mapping));
10217         (void)rte_memcpy(&vsi->info.queue_mapping,
10218                         &ctxt.info.queue_mapping,
10219                 sizeof(vsi->info.queue_mapping));
10220         vsi->info.mapping_flags = ctxt.info.mapping_flags;
10221         vsi->info.valid_sections = 0;
10222
10223         /* query and update current VSI BW information */
10224         ret = i40e_vsi_get_bw_config(vsi);
10225         if (ret) {
10226                 PMD_INIT_LOG(ERR,
10227                          "Failed updating vsi bw info, err %s aq_err %s",
10228                          i40e_stat_str(hw, ret),
10229                          i40e_aq_str(hw, hw->aq.asq_last_status));
10230                 goto out;
10231         }
10232
10233         vsi->enabled_tc = tc_map;
10234
10235 out:
10236         return ret;
10237 }
10238
10239 /*
10240  * i40e_dcb_hw_configure - program the dcb setting to hw
10241  * @pf: pf the configuration is taken on
10242  * @new_cfg: new configuration
10243  * @tc_map: enabled TC bitmap
10244  *
10245  * Returns 0 on success, negative value on failure
10246  */
10247 static enum i40e_status_code
10248 i40e_dcb_hw_configure(struct i40e_pf *pf,
10249                       struct i40e_dcbx_config *new_cfg,
10250                       uint8_t tc_map)
10251 {
10252         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10253         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
10254         struct i40e_vsi *main_vsi = pf->main_vsi;
10255         struct i40e_vsi_list *vsi_list;
10256         enum i40e_status_code ret;
10257         int i;
10258         uint32_t val;
10259
10260         /* Use the FW API if FW > v4.4*/
10261         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
10262               (hw->aq.fw_maj_ver >= 5))) {
10263                 PMD_INIT_LOG(ERR,
10264                         "FW < v4.4, can not use FW LLDP API to configure DCB");
10265                 return I40E_ERR_FIRMWARE_API_VERSION;
10266         }
10267
10268         /* Check if need reconfiguration */
10269         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
10270                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
10271                 return I40E_SUCCESS;
10272         }
10273
10274         /* Copy the new config to the current config */
10275         *old_cfg = *new_cfg;
10276         old_cfg->etsrec = old_cfg->etscfg;
10277         ret = i40e_set_dcb_config(hw);
10278         if (ret) {
10279                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
10280                          i40e_stat_str(hw, ret),
10281                          i40e_aq_str(hw, hw->aq.asq_last_status));
10282                 return ret;
10283         }
10284         /* set receive Arbiter to RR mode and ETS scheme by default */
10285         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
10286                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
10287                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
10288                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
10289                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
10290                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
10291                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
10292                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
10293                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
10294                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
10295                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
10296                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
10297                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
10298         }
10299         /* get local mib to check whether it is configured correctly */
10300         /* IEEE mode */
10301         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
10302         /* Get Local DCB Config */
10303         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
10304                                      &hw->local_dcbx_config);
10305
10306         /* if Veb is created, need to update TC of it at first */
10307         if (main_vsi->veb) {
10308                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
10309                 if (ret)
10310                         PMD_INIT_LOG(WARNING,
10311                                  "Failed configuring TC for VEB seid=%d",
10312                                  main_vsi->veb->seid);
10313         }
10314         /* Update each VSI */
10315         i40e_vsi_config_tc(main_vsi, tc_map);
10316         if (main_vsi->veb) {
10317                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
10318                         /* Beside main VSI and VMDQ VSIs, only enable default
10319                          * TC for other VSIs
10320                          */
10321                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
10322                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10323                                                          tc_map);
10324                         else
10325                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10326                                                          I40E_DEFAULT_TCMAP);
10327                         if (ret)
10328                                 PMD_INIT_LOG(WARNING,
10329                                         "Failed configuring TC for VSI seid=%d",
10330                                         vsi_list->vsi->seid);
10331                         /* continue */
10332                 }
10333         }
10334         return I40E_SUCCESS;
10335 }
10336
10337 /*
10338  * i40e_dcb_init_configure - initial dcb config
10339  * @dev: device being configured
10340  * @sw_dcb: indicate whether dcb is sw configured or hw offload
10341  *
10342  * Returns 0 on success, negative value on failure
10343  */
10344 static int
10345 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
10346 {
10347         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10348         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10349         int i, ret = 0;
10350
10351         if ((pf->flags & I40E_FLAG_DCB) == 0) {
10352                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10353                 return -ENOTSUP;
10354         }
10355
10356         /* DCB initialization:
10357          * Update DCB configuration from the Firmware and configure
10358          * LLDP MIB change event.
10359          */
10360         if (sw_dcb == TRUE) {
10361                 ret = i40e_init_dcb(hw);
10362                 /* If lldp agent is stopped, the return value from
10363                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
10364                  * adminq status. Otherwise, it should return success.
10365                  */
10366                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
10367                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
10368                         memset(&hw->local_dcbx_config, 0,
10369                                 sizeof(struct i40e_dcbx_config));
10370                         /* set dcb default configuration */
10371                         hw->local_dcbx_config.etscfg.willing = 0;
10372                         hw->local_dcbx_config.etscfg.maxtcs = 0;
10373                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
10374                         hw->local_dcbx_config.etscfg.tsatable[0] =
10375                                                 I40E_IEEE_TSA_ETS;
10376                         /* all UPs mapping to TC0 */
10377                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10378                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
10379                         hw->local_dcbx_config.etsrec =
10380                                 hw->local_dcbx_config.etscfg;
10381                         hw->local_dcbx_config.pfc.willing = 0;
10382                         hw->local_dcbx_config.pfc.pfccap =
10383                                                 I40E_MAX_TRAFFIC_CLASS;
10384                         /* FW needs one App to configure HW */
10385                         hw->local_dcbx_config.numapps = 1;
10386                         hw->local_dcbx_config.app[0].selector =
10387                                                 I40E_APP_SEL_ETHTYPE;
10388                         hw->local_dcbx_config.app[0].priority = 3;
10389                         hw->local_dcbx_config.app[0].protocolid =
10390                                                 I40E_APP_PROTOID_FCOE;
10391                         ret = i40e_set_dcb_config(hw);
10392                         if (ret) {
10393                                 PMD_INIT_LOG(ERR,
10394                                         "default dcb config fails. err = %d, aq_err = %d.",
10395                                         ret, hw->aq.asq_last_status);
10396                                 return -ENOSYS;
10397                         }
10398                 } else {
10399                         PMD_INIT_LOG(ERR,
10400                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
10401                                 ret, hw->aq.asq_last_status);
10402                         return -ENOTSUP;
10403                 }
10404         } else {
10405                 ret = i40e_aq_start_lldp(hw, NULL);
10406                 if (ret != I40E_SUCCESS)
10407                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
10408
10409                 ret = i40e_init_dcb(hw);
10410                 if (!ret) {
10411                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
10412                                 PMD_INIT_LOG(ERR,
10413                                         "HW doesn't support DCBX offload.");
10414                                 return -ENOTSUP;
10415                         }
10416                 } else {
10417                         PMD_INIT_LOG(ERR,
10418                                 "DCBX configuration failed, err = %d, aq_err = %d.",
10419                                 ret, hw->aq.asq_last_status);
10420                         return -ENOTSUP;
10421                 }
10422         }
10423         return 0;
10424 }
10425
10426 /*
10427  * i40e_dcb_setup - setup dcb related config
10428  * @dev: device being configured
10429  *
10430  * Returns 0 on success, negative value on failure
10431  */
10432 static int
10433 i40e_dcb_setup(struct rte_eth_dev *dev)
10434 {
10435         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10436         struct i40e_dcbx_config dcb_cfg;
10437         uint8_t tc_map = 0;
10438         int ret = 0;
10439
10440         if ((pf->flags & I40E_FLAG_DCB) == 0) {
10441                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10442                 return -ENOTSUP;
10443         }
10444
10445         if (pf->vf_num != 0)
10446                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
10447
10448         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
10449         if (ret) {
10450                 PMD_INIT_LOG(ERR, "invalid dcb config");
10451                 return -EINVAL;
10452         }
10453         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
10454         if (ret) {
10455                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
10456                 return -ENOSYS;
10457         }
10458
10459         return 0;
10460 }
10461
10462 static int
10463 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
10464                       struct rte_eth_dcb_info *dcb_info)
10465 {
10466         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10467         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10468         struct i40e_vsi *vsi = pf->main_vsi;
10469         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
10470         uint16_t bsf, tc_mapping;
10471         int i, j = 0;
10472
10473         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
10474                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
10475         else
10476                 dcb_info->nb_tcs = 1;
10477         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10478                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
10479         for (i = 0; i < dcb_info->nb_tcs; i++)
10480                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
10481
10482         /* get queue mapping if vmdq is disabled */
10483         if (!pf->nb_cfg_vmdq_vsi) {
10484                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10485                         if (!(vsi->enabled_tc & (1 << i)))
10486                                 continue;
10487                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
10488                         dcb_info->tc_queue.tc_rxq[j][i].base =
10489                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
10490                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
10491                         dcb_info->tc_queue.tc_txq[j][i].base =
10492                                 dcb_info->tc_queue.tc_rxq[j][i].base;
10493                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
10494                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
10495                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
10496                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
10497                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
10498                 }
10499                 return 0;
10500         }
10501
10502         /* get queue mapping if vmdq is enabled */
10503         do {
10504                 vsi = pf->vmdq[j].vsi;
10505                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10506                         if (!(vsi->enabled_tc & (1 << i)))
10507                                 continue;
10508                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
10509                         dcb_info->tc_queue.tc_rxq[j][i].base =
10510                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
10511                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
10512                         dcb_info->tc_queue.tc_txq[j][i].base =
10513                                 dcb_info->tc_queue.tc_rxq[j][i].base;
10514                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
10515                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
10516                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
10517                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
10518                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
10519                 }
10520                 j++;
10521         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
10522         return 0;
10523 }
10524
10525 static int
10526 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
10527 {
10528         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10529         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
10530         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10531         uint16_t interval =
10532                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
10533         uint16_t msix_intr;
10534
10535         msix_intr = intr_handle->intr_vec[queue_id];
10536         if (msix_intr == I40E_MISC_VEC_ID)
10537                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
10538                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
10539                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
10540                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
10541                                (interval <<
10542                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
10543         else
10544                 I40E_WRITE_REG(hw,
10545                                I40E_PFINT_DYN_CTLN(msix_intr -
10546                                                    I40E_RX_VEC_START),
10547                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
10548                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
10549                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
10550                                (interval <<
10551                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
10552
10553         I40E_WRITE_FLUSH(hw);
10554         rte_intr_enable(&pci_dev->intr_handle);
10555
10556         return 0;
10557 }
10558
10559 static int
10560 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
10561 {
10562         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10563         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
10564         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10565         uint16_t msix_intr;
10566
10567         msix_intr = intr_handle->intr_vec[queue_id];
10568         if (msix_intr == I40E_MISC_VEC_ID)
10569                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
10570         else
10571                 I40E_WRITE_REG(hw,
10572                                I40E_PFINT_DYN_CTLN(msix_intr -
10573                                                    I40E_RX_VEC_START),
10574                                0);
10575         I40E_WRITE_FLUSH(hw);
10576
10577         return 0;
10578 }
10579
10580 static int i40e_get_regs(struct rte_eth_dev *dev,
10581                          struct rte_dev_reg_info *regs)
10582 {
10583         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10584         uint32_t *ptr_data = regs->data;
10585         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
10586         const struct i40e_reg_info *reg_info;
10587
10588         if (ptr_data == NULL) {
10589                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
10590                 regs->width = sizeof(uint32_t);
10591                 return 0;
10592         }
10593
10594         /* The first few registers have to be read using AQ operations */
10595         reg_idx = 0;
10596         while (i40e_regs_adminq[reg_idx].name) {
10597                 reg_info = &i40e_regs_adminq[reg_idx++];
10598                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
10599                         for (arr_idx2 = 0;
10600                                         arr_idx2 <= reg_info->count2;
10601                                         arr_idx2++) {
10602                                 reg_offset = arr_idx * reg_info->stride1 +
10603                                         arr_idx2 * reg_info->stride2;
10604                                 reg_offset += reg_info->base_addr;
10605                                 ptr_data[reg_offset >> 2] =
10606                                         i40e_read_rx_ctl(hw, reg_offset);
10607                         }
10608         }
10609
10610         /* The remaining registers can be read using primitives */
10611         reg_idx = 0;
10612         while (i40e_regs_others[reg_idx].name) {
10613                 reg_info = &i40e_regs_others[reg_idx++];
10614                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
10615                         for (arr_idx2 = 0;
10616                                         arr_idx2 <= reg_info->count2;
10617                                         arr_idx2++) {
10618                                 reg_offset = arr_idx * reg_info->stride1 +
10619                                         arr_idx2 * reg_info->stride2;
10620                                 reg_offset += reg_info->base_addr;
10621                                 ptr_data[reg_offset >> 2] =
10622                                         I40E_READ_REG(hw, reg_offset);
10623                         }
10624         }
10625
10626         return 0;
10627 }
10628
10629 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
10630 {
10631         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10632
10633         /* Convert word count to byte count */
10634         return hw->nvm.sr_size << 1;
10635 }
10636
10637 static int i40e_get_eeprom(struct rte_eth_dev *dev,
10638                            struct rte_dev_eeprom_info *eeprom)
10639 {
10640         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10641         uint16_t *data = eeprom->data;
10642         uint16_t offset, length, cnt_words;
10643         int ret_code;
10644
10645         offset = eeprom->offset >> 1;
10646         length = eeprom->length >> 1;
10647         cnt_words = length;
10648
10649         if (offset > hw->nvm.sr_size ||
10650                 offset + length > hw->nvm.sr_size) {
10651                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
10652                 return -EINVAL;
10653         }
10654
10655         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
10656
10657         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
10658         if (ret_code != I40E_SUCCESS || cnt_words != length) {
10659                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
10660                 return -EIO;
10661         }
10662
10663         return 0;
10664 }
10665
10666 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
10667                                       struct ether_addr *mac_addr)
10668 {
10669         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10670
10671         if (!is_valid_assigned_ether_addr(mac_addr)) {
10672                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
10673                 return;
10674         }
10675
10676         /* Flags: 0x3 updates port address */
10677         i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
10678 }
10679
10680 static int
10681 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
10682 {
10683         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10684         struct rte_eth_dev_data *dev_data = pf->dev_data;
10685         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
10686         int ret = 0;
10687
10688         /* check if mtu is within the allowed range */
10689         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
10690                 return -EINVAL;
10691
10692         /* mtu setting is forbidden if port is start */
10693         if (dev_data->dev_started) {
10694                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
10695                             dev_data->port_id);
10696                 return -EBUSY;
10697         }
10698
10699         if (frame_size > ETHER_MAX_LEN)
10700                 dev_data->dev_conf.rxmode.jumbo_frame = 1;
10701         else
10702                 dev_data->dev_conf.rxmode.jumbo_frame = 0;
10703
10704         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
10705
10706         return ret;
10707 }
10708
10709 /* Restore ethertype filter */
10710 static void
10711 i40e_ethertype_filter_restore(struct i40e_pf *pf)
10712 {
10713         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10714         struct i40e_ethertype_filter_list
10715                 *ethertype_list = &pf->ethertype.ethertype_list;
10716         struct i40e_ethertype_filter *f;
10717         struct i40e_control_filter_stats stats;
10718         uint16_t flags;
10719
10720         TAILQ_FOREACH(f, ethertype_list, rules) {
10721                 flags = 0;
10722                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
10723                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
10724                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
10725                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
10726                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
10727
10728                 memset(&stats, 0, sizeof(stats));
10729                 i40e_aq_add_rem_control_packet_filter(hw,
10730                                             f->input.mac_addr.addr_bytes,
10731                                             f->input.ether_type,
10732                                             flags, pf->main_vsi->seid,
10733                                             f->queue, 1, &stats, NULL);
10734         }
10735         PMD_DRV_LOG(INFO, "Ethertype filter:"
10736                     " mac_etype_used = %u, etype_used = %u,"
10737                     " mac_etype_free = %u, etype_free = %u",
10738                     stats.mac_etype_used, stats.etype_used,
10739                     stats.mac_etype_free, stats.etype_free);
10740 }
10741
10742 /* Restore tunnel filter */
10743 static void
10744 i40e_tunnel_filter_restore(struct i40e_pf *pf)
10745 {
10746         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10747         struct i40e_vsi *vsi;
10748         struct i40e_pf_vf *vf;
10749         struct i40e_tunnel_filter_list
10750                 *tunnel_list = &pf->tunnel.tunnel_list;
10751         struct i40e_tunnel_filter *f;
10752         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
10753         bool big_buffer = 0;
10754
10755         TAILQ_FOREACH(f, tunnel_list, rules) {
10756                 if (!f->is_to_vf)
10757                         vsi = pf->main_vsi;
10758                 else {
10759                         vf = &pf->vfs[f->vf_id];
10760                         vsi = vf->vsi;
10761                 }
10762                 memset(&cld_filter, 0, sizeof(cld_filter));
10763                 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
10764                         (struct ether_addr *)&cld_filter.element.outer_mac);
10765                 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
10766                         (struct ether_addr *)&cld_filter.element.inner_mac);
10767                 cld_filter.element.inner_vlan = f->input.inner_vlan;
10768                 cld_filter.element.flags = f->input.flags;
10769                 cld_filter.element.tenant_id = f->input.tenant_id;
10770                 cld_filter.element.queue_number = f->queue;
10771                 rte_memcpy(cld_filter.general_fields,
10772                            f->input.general_fields,
10773                            sizeof(f->input.general_fields));
10774
10775                 if (((f->input.flags &
10776                      I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
10777                      I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
10778                     ((f->input.flags &
10779                      I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
10780                      I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
10781                     ((f->input.flags &
10782                      I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
10783                      I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
10784                         big_buffer = 1;
10785
10786                 if (big_buffer)
10787                         i40e_aq_add_cloud_filters_big_buffer(hw,
10788                                              vsi->seid, &cld_filter, 1);
10789                 else
10790                         i40e_aq_add_cloud_filters(hw, vsi->seid,
10791                                                   &cld_filter.element, 1);
10792         }
10793 }
10794
10795 static void
10796 i40e_filter_restore(struct i40e_pf *pf)
10797 {
10798         i40e_ethertype_filter_restore(pf);
10799         i40e_tunnel_filter_restore(pf);
10800         i40e_fdir_filter_restore(pf);
10801 }
10802
10803 static bool
10804 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
10805 {
10806         if (strcmp(dev->device->driver->name, drv->driver.name))
10807                 return false;
10808
10809         return true;
10810 }
10811
10812 bool
10813 is_i40e_supported(struct rte_eth_dev *dev)
10814 {
10815         return is_device_supported(dev, &rte_i40e_pmd);
10816 }
10817
10818 /* Create a QinQ cloud filter
10819  *
10820  * The Fortville NIC has limited resources for tunnel filters,
10821  * so we can only reuse existing filters.
10822  *
10823  * In step 1 we define which Field Vector fields can be used for
10824  * filter types.
10825  * As we do not have the inner tag defined as a field,
10826  * we have to define it first, by reusing one of L1 entries.
10827  *
10828  * In step 2 we are replacing one of existing filter types with
10829  * a new one for QinQ.
10830  * As we reusing L1 and replacing L2, some of the default filter
10831  * types will disappear,which depends on L1 and L2 entries we reuse.
10832  *
10833  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
10834  *
10835  * 1.   Create L1 filter of outer vlan (12b) which will be in use
10836  *              later when we define the cloud filter.
10837  *      a.      Valid_flags.replace_cloud = 0
10838  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
10839  *      c.      New_filter = 0x10
10840  *      d.      TR bit = 0xff (optional, not used here)
10841  *      e.      Buffer – 2 entries:
10842  *              i.      Byte 0 = 8 (outer vlan FV index).
10843  *                      Byte 1 = 0 (rsv)
10844  *                      Byte 2-3 = 0x0fff
10845  *              ii.     Byte 0 = 37 (inner vlan FV index).
10846  *                      Byte 1 =0 (rsv)
10847  *                      Byte 2-3 = 0x0fff
10848  *
10849  * Step 2:
10850  * 2.   Create cloud filter using two L1 filters entries: stag and
10851  *              new filter(outer vlan+ inner vlan)
10852  *      a.      Valid_flags.replace_cloud = 1
10853  *      b.      Old_filter = 1 (instead of outer IP)
10854  *      c.      New_filter = 0x10
10855  *      d.      Buffer – 2 entries:
10856  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
10857  *                      Byte 1-3 = 0 (rsv)
10858  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
10859  *                      Byte 9-11 = 0 (rsv)
10860  */
10861 static int
10862 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
10863 {
10864         int ret = -ENOTSUP;
10865         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
10866         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
10867         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10868
10869         /* Init */
10870         memset(&filter_replace, 0,
10871                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
10872         memset(&filter_replace_buf, 0,
10873                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
10874
10875         /* create L1 filter */
10876         filter_replace.old_filter_type =
10877                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
10878         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
10879         filter_replace.tr_bit = 0;
10880
10881         /* Prepare the buffer, 2 entries */
10882         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
10883         filter_replace_buf.data[0] |=
10884                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
10885         /* Field Vector 12b mask */
10886         filter_replace_buf.data[2] = 0xff;
10887         filter_replace_buf.data[3] = 0x0f;
10888         filter_replace_buf.data[4] =
10889                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
10890         filter_replace_buf.data[4] |=
10891                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
10892         /* Field Vector 12b mask */
10893         filter_replace_buf.data[6] = 0xff;
10894         filter_replace_buf.data[7] = 0x0f;
10895         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
10896                         &filter_replace_buf);
10897         if (ret != I40E_SUCCESS)
10898                 return ret;
10899
10900         /* Apply the second L2 cloud filter */
10901         memset(&filter_replace, 0,
10902                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
10903         memset(&filter_replace_buf, 0,
10904                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
10905
10906         /* create L2 filter, input for L2 filter will be L1 filter  */
10907         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
10908         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
10909         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
10910
10911         /* Prepare the buffer, 2 entries */
10912         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
10913         filter_replace_buf.data[0] |=
10914                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
10915         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
10916         filter_replace_buf.data[4] |=
10917                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
10918         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
10919                         &filter_replace_buf);
10920         return ret;
10921 }
10922
10923 RTE_INIT(i40e_init_log);
10924 static void
10925 i40e_init_log(void)
10926 {
10927         i40e_logtype_init = rte_log_register("pmd.i40e.init");
10928         if (i40e_logtype_init >= 0)
10929                 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
10930         i40e_logtype_driver = rte_log_register("pmd.i40e.driver");
10931         if (i40e_logtype_driver >= 0)
10932                 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
10933 }