65a4b7ad74726105e42cf3eae1f9b53ab65a02e9
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_eal.h>
15 #include <rte_string_fns.h>
16 #include <rte_pci.h>
17 #include <rte_bus_pci.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_pci.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_alarm.h>
25 #include <rte_dev.h>
26 #include <rte_eth_ctrl.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29
30 #include "i40e_logs.h"
31 #include "base/i40e_prototype.h"
32 #include "base/i40e_adminq_cmd.h"
33 #include "base/i40e_type.h"
34 #include "base/i40e_register.h"
35 #include "base/i40e_dcb.h"
36 #include "i40e_ethdev.h"
37 #include "i40e_rxtx.h"
38 #include "i40e_pf.h"
39 #include "i40e_regs.h"
40 #include "rte_pmd_i40e.h"
41
42 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
43 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
44
45 #define I40E_CLEAR_PXE_WAIT_MS     200
46
47 /* Maximun number of capability elements */
48 #define I40E_MAX_CAP_ELE_NUM       128
49
50 /* Wait count and interval */
51 #define I40E_CHK_Q_ENA_COUNT       1000
52 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
53
54 /* Maximun number of VSI */
55 #define I40E_MAX_NUM_VSIS          (384UL)
56
57 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
58
59 /* Flow control default timer */
60 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
61
62 /* Flow control enable fwd bit */
63 #define I40E_PRTMAC_FWD_CTRL   0x00000001
64
65 /* Receive Packet Buffer size */
66 #define I40E_RXPBSIZE (968 * 1024)
67
68 /* Kilobytes shift */
69 #define I40E_KILOSHIFT 10
70
71 /* Flow control default high water */
72 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
73
74 /* Flow control default low water */
75 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
76
77 /* Receive Average Packet Size in Byte*/
78 #define I40E_PACKET_AVERAGE_SIZE 128
79
80 /* Mask of PF interrupt causes */
81 #define I40E_PFINT_ICR0_ENA_MASK ( \
82                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
83                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
84                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
85                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
86                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
87                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
88                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
89                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
90                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
91
92 #define I40E_FLOW_TYPES ( \
93         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
94         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
95         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
96         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
97         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
98         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
99         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
100         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
103         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
104
105 /* Additional timesync values. */
106 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
107 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
108 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
109 #define I40E_PRTTSYN_TSYNENA     0x80000000
110 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
111 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
112
113 /**
114  * Below are values for writing un-exposed registers suggested
115  * by silicon experts
116  */
117 /* Destination MAC address */
118 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
119 /* Source MAC address */
120 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
121 /* Outer (S-Tag) VLAN tag in the outer L2 header */
122 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
123 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
124 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
125 /* Single VLAN tag in the inner L2 header */
126 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
127 /* Source IPv4 address */
128 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
129 /* Destination IPv4 address */
130 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
131 /* Source IPv4 address for X722 */
132 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
133 /* Destination IPv4 address for X722 */
134 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
135 /* IPv4 Protocol for X722 */
136 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
137 /* IPv4 Time to Live for X722 */
138 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
139 /* IPv4 Type of Service (TOS) */
140 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
141 /* IPv4 Protocol */
142 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
143 /* IPv4 Time to Live */
144 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
145 /* Source IPv6 address */
146 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
147 /* Destination IPv6 address */
148 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
149 /* IPv6 Traffic Class (TC) */
150 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
151 /* IPv6 Next Header */
152 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
153 /* IPv6 Hop Limit */
154 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
155 /* Source L4 port */
156 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
157 /* Destination L4 port */
158 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
159 /* SCTP verification tag */
160 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
161 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
162 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
163 /* Source port of tunneling UDP */
164 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
165 /* Destination port of tunneling UDP */
166 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
167 /* UDP Tunneling ID, NVGRE/GRE key */
168 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
169 /* Last ether type */
170 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
171 /* Tunneling outer destination IPv4 address */
172 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
173 /* Tunneling outer destination IPv6 address */
174 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
175 /* 1st word of flex payload */
176 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
177 /* 2nd word of flex payload */
178 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
179 /* 3rd word of flex payload */
180 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
181 /* 4th word of flex payload */
182 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
183 /* 5th word of flex payload */
184 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
185 /* 6th word of flex payload */
186 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
187 /* 7th word of flex payload */
188 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
189 /* 8th word of flex payload */
190 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
191 /* all 8 words flex payload */
192 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
193 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
194
195 #define I40E_TRANSLATE_INSET 0
196 #define I40E_TRANSLATE_REG   1
197
198 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
199 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
200 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
201 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
202 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
203 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
204
205 /* PCI offset for querying capability */
206 #define PCI_DEV_CAP_REG            0xA4
207 /* PCI offset for enabling/disabling Extended Tag */
208 #define PCI_DEV_CTRL_REG           0xA8
209 /* Bit mask of Extended Tag capability */
210 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
211 /* Bit shift of Extended Tag enable/disable */
212 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
213 /* Bit mask of Extended Tag enable/disable */
214 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
215
216 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
217 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
218 static int i40e_dev_configure(struct rte_eth_dev *dev);
219 static int i40e_dev_start(struct rte_eth_dev *dev);
220 static void i40e_dev_stop(struct rte_eth_dev *dev);
221 static void i40e_dev_close(struct rte_eth_dev *dev);
222 static int  i40e_dev_reset(struct rte_eth_dev *dev);
223 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
224 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
225 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
226 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
227 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
228 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
229 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
230                                struct rte_eth_stats *stats);
231 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
232                                struct rte_eth_xstat *xstats, unsigned n);
233 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
234                                      struct rte_eth_xstat_name *xstats_names,
235                                      unsigned limit);
236 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
237 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
238                                             uint16_t queue_id,
239                                             uint8_t stat_idx,
240                                             uint8_t is_rx);
241 static int i40e_fw_version_get(struct rte_eth_dev *dev,
242                                 char *fw_version, size_t fw_size);
243 static void i40e_dev_info_get(struct rte_eth_dev *dev,
244                               struct rte_eth_dev_info *dev_info);
245 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
246                                 uint16_t vlan_id,
247                                 int on);
248 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
249                               enum rte_vlan_type vlan_type,
250                               uint16_t tpid);
251 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
252 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
253                                       uint16_t queue,
254                                       int on);
255 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
256 static int i40e_dev_led_on(struct rte_eth_dev *dev);
257 static int i40e_dev_led_off(struct rte_eth_dev *dev);
258 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
259                               struct rte_eth_fc_conf *fc_conf);
260 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
261                               struct rte_eth_fc_conf *fc_conf);
262 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
263                                        struct rte_eth_pfc_conf *pfc_conf);
264 static int i40e_macaddr_add(struct rte_eth_dev *dev,
265                             struct ether_addr *mac_addr,
266                             uint32_t index,
267                             uint32_t pool);
268 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
269 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
270                                     struct rte_eth_rss_reta_entry64 *reta_conf,
271                                     uint16_t reta_size);
272 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
273                                    struct rte_eth_rss_reta_entry64 *reta_conf,
274                                    uint16_t reta_size);
275
276 static int i40e_get_cap(struct i40e_hw *hw);
277 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
278 static int i40e_pf_setup(struct i40e_pf *pf);
279 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
280 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
281 static int i40e_dcb_setup(struct rte_eth_dev *dev);
282 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
283                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
284 static void i40e_stat_update_48(struct i40e_hw *hw,
285                                uint32_t hireg,
286                                uint32_t loreg,
287                                bool offset_loaded,
288                                uint64_t *offset,
289                                uint64_t *stat);
290 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
291 static void i40e_dev_interrupt_handler(void *param);
292 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
293                                 uint32_t base, uint32_t num);
294 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
295 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
296                         uint32_t base);
297 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
298                         uint16_t num);
299 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
300 static int i40e_veb_release(struct i40e_veb *veb);
301 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
302                                                 struct i40e_vsi *vsi);
303 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
304 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
305 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
306                                              struct i40e_macvlan_filter *mv_f,
307                                              int num,
308                                              uint16_t vlan);
309 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
310 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
311                                     struct rte_eth_rss_conf *rss_conf);
312 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
313                                       struct rte_eth_rss_conf *rss_conf);
314 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
315                                         struct rte_eth_udp_tunnel *udp_tunnel);
316 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
317                                         struct rte_eth_udp_tunnel *udp_tunnel);
318 static void i40e_filter_input_set_init(struct i40e_pf *pf);
319 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
320                                 enum rte_filter_op filter_op,
321                                 void *arg);
322 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
323                                 enum rte_filter_type filter_type,
324                                 enum rte_filter_op filter_op,
325                                 void *arg);
326 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
327                                   struct rte_eth_dcb_info *dcb_info);
328 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
329 static void i40e_configure_registers(struct i40e_hw *hw);
330 static void i40e_hw_init(struct rte_eth_dev *dev);
331 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
332 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
333                                                      uint16_t seid,
334                                                      uint16_t rule_type,
335                                                      uint16_t *entries,
336                                                      uint16_t count,
337                                                      uint16_t rule_id);
338 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
339                         struct rte_eth_mirror_conf *mirror_conf,
340                         uint8_t sw_id, uint8_t on);
341 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
342
343 static int i40e_timesync_enable(struct rte_eth_dev *dev);
344 static int i40e_timesync_disable(struct rte_eth_dev *dev);
345 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
346                                            struct timespec *timestamp,
347                                            uint32_t flags);
348 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
349                                            struct timespec *timestamp);
350 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
351
352 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
353
354 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
355                                    struct timespec *timestamp);
356 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
357                                     const struct timespec *timestamp);
358
359 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
360                                          uint16_t queue_id);
361 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
362                                           uint16_t queue_id);
363
364 static int i40e_get_regs(struct rte_eth_dev *dev,
365                          struct rte_dev_reg_info *regs);
366
367 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
368
369 static int i40e_get_eeprom(struct rte_eth_dev *dev,
370                            struct rte_dev_eeprom_info *eeprom);
371
372 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
373                                       struct ether_addr *mac_addr);
374
375 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
376
377 static int i40e_ethertype_filter_convert(
378         const struct rte_eth_ethertype_filter *input,
379         struct i40e_ethertype_filter *filter);
380 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
381                                    struct i40e_ethertype_filter *filter);
382
383 static int i40e_tunnel_filter_convert(
384         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
385         struct i40e_tunnel_filter *tunnel_filter);
386 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
387                                 struct i40e_tunnel_filter *tunnel_filter);
388 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
389
390 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
391 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
392 static void i40e_filter_restore(struct i40e_pf *pf);
393 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
394
395 int i40e_logtype_init;
396 int i40e_logtype_driver;
397
398 static const struct rte_pci_id pci_id_i40e_map[] = {
399         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
400         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
401         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
402         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
403         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
404         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
405         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
406         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
407         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
408         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
409         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
410         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
411         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
412         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
413         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
419         { .vendor_id = 0, /* sentinel */ },
420 };
421
422 static const struct eth_dev_ops i40e_eth_dev_ops = {
423         .dev_configure                = i40e_dev_configure,
424         .dev_start                    = i40e_dev_start,
425         .dev_stop                     = i40e_dev_stop,
426         .dev_close                    = i40e_dev_close,
427         .dev_reset                    = i40e_dev_reset,
428         .promiscuous_enable           = i40e_dev_promiscuous_enable,
429         .promiscuous_disable          = i40e_dev_promiscuous_disable,
430         .allmulticast_enable          = i40e_dev_allmulticast_enable,
431         .allmulticast_disable         = i40e_dev_allmulticast_disable,
432         .dev_set_link_up              = i40e_dev_set_link_up,
433         .dev_set_link_down            = i40e_dev_set_link_down,
434         .link_update                  = i40e_dev_link_update,
435         .stats_get                    = i40e_dev_stats_get,
436         .xstats_get                   = i40e_dev_xstats_get,
437         .xstats_get_names             = i40e_dev_xstats_get_names,
438         .stats_reset                  = i40e_dev_stats_reset,
439         .xstats_reset                 = i40e_dev_stats_reset,
440         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
441         .fw_version_get               = i40e_fw_version_get,
442         .dev_infos_get                = i40e_dev_info_get,
443         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
444         .vlan_filter_set              = i40e_vlan_filter_set,
445         .vlan_tpid_set                = i40e_vlan_tpid_set,
446         .vlan_offload_set             = i40e_vlan_offload_set,
447         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
448         .vlan_pvid_set                = i40e_vlan_pvid_set,
449         .rx_queue_start               = i40e_dev_rx_queue_start,
450         .rx_queue_stop                = i40e_dev_rx_queue_stop,
451         .tx_queue_start               = i40e_dev_tx_queue_start,
452         .tx_queue_stop                = i40e_dev_tx_queue_stop,
453         .rx_queue_setup               = i40e_dev_rx_queue_setup,
454         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
455         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
456         .rx_queue_release             = i40e_dev_rx_queue_release,
457         .rx_queue_count               = i40e_dev_rx_queue_count,
458         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
459         .rx_descriptor_status         = i40e_dev_rx_descriptor_status,
460         .tx_descriptor_status         = i40e_dev_tx_descriptor_status,
461         .tx_queue_setup               = i40e_dev_tx_queue_setup,
462         .tx_queue_release             = i40e_dev_tx_queue_release,
463         .dev_led_on                   = i40e_dev_led_on,
464         .dev_led_off                  = i40e_dev_led_off,
465         .flow_ctrl_get                = i40e_flow_ctrl_get,
466         .flow_ctrl_set                = i40e_flow_ctrl_set,
467         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
468         .mac_addr_add                 = i40e_macaddr_add,
469         .mac_addr_remove              = i40e_macaddr_remove,
470         .reta_update                  = i40e_dev_rss_reta_update,
471         .reta_query                   = i40e_dev_rss_reta_query,
472         .rss_hash_update              = i40e_dev_rss_hash_update,
473         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
474         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
475         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
476         .filter_ctrl                  = i40e_dev_filter_ctrl,
477         .rxq_info_get                 = i40e_rxq_info_get,
478         .txq_info_get                 = i40e_txq_info_get,
479         .mirror_rule_set              = i40e_mirror_rule_set,
480         .mirror_rule_reset            = i40e_mirror_rule_reset,
481         .timesync_enable              = i40e_timesync_enable,
482         .timesync_disable             = i40e_timesync_disable,
483         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
484         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
485         .get_dcb_info                 = i40e_dev_get_dcb_info,
486         .timesync_adjust_time         = i40e_timesync_adjust_time,
487         .timesync_read_time           = i40e_timesync_read_time,
488         .timesync_write_time          = i40e_timesync_write_time,
489         .get_reg                      = i40e_get_regs,
490         .get_eeprom_length            = i40e_get_eeprom_length,
491         .get_eeprom                   = i40e_get_eeprom,
492         .mac_addr_set                 = i40e_set_default_mac_addr,
493         .mtu_set                      = i40e_dev_mtu_set,
494         .tm_ops_get                   = i40e_tm_ops_get,
495 };
496
497 /* store statistics names and its offset in stats structure */
498 struct rte_i40e_xstats_name_off {
499         char name[RTE_ETH_XSTATS_NAME_SIZE];
500         unsigned offset;
501 };
502
503 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
504         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
505         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
506         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
507         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
508         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
509                 rx_unknown_protocol)},
510         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
511         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
512         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
513         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
514 };
515
516 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
517                 sizeof(rte_i40e_stats_strings[0]))
518
519 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
520         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
521                 tx_dropped_link_down)},
522         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
523         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
524                 illegal_bytes)},
525         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
526         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
527                 mac_local_faults)},
528         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
529                 mac_remote_faults)},
530         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
531                 rx_length_errors)},
532         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
533         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
534         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
535         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
536         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
537         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
538                 rx_size_127)},
539         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
540                 rx_size_255)},
541         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
542                 rx_size_511)},
543         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
544                 rx_size_1023)},
545         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
546                 rx_size_1522)},
547         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
548                 rx_size_big)},
549         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
550                 rx_undersize)},
551         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
552                 rx_oversize)},
553         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
554                 mac_short_packet_dropped)},
555         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
556                 rx_fragments)},
557         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
558         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
559         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
560                 tx_size_127)},
561         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
562                 tx_size_255)},
563         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
564                 tx_size_511)},
565         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
566                 tx_size_1023)},
567         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
568                 tx_size_1522)},
569         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
570                 tx_size_big)},
571         {"rx_flow_director_atr_match_packets",
572                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
573         {"rx_flow_director_sb_match_packets",
574                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
575         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
576                 tx_lpi_status)},
577         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
578                 rx_lpi_status)},
579         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
580                 tx_lpi_count)},
581         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
582                 rx_lpi_count)},
583 };
584
585 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
586                 sizeof(rte_i40e_hw_port_strings[0]))
587
588 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
589         {"xon_packets", offsetof(struct i40e_hw_port_stats,
590                 priority_xon_rx)},
591         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
592                 priority_xoff_rx)},
593 };
594
595 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
596                 sizeof(rte_i40e_rxq_prio_strings[0]))
597
598 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
599         {"xon_packets", offsetof(struct i40e_hw_port_stats,
600                 priority_xon_tx)},
601         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
602                 priority_xoff_tx)},
603         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
604                 priority_xon_2_xoff)},
605 };
606
607 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
608                 sizeof(rte_i40e_txq_prio_strings[0]))
609
610 static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
611         struct rte_pci_device *pci_dev)
612 {
613         return rte_eth_dev_pci_generic_probe(pci_dev,
614                 sizeof(struct i40e_adapter), eth_i40e_dev_init);
615 }
616
617 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
618 {
619         return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
620 }
621
622 static struct rte_pci_driver rte_i40e_pmd = {
623         .id_table = pci_id_i40e_map,
624         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
625                      RTE_PCI_DRV_IOVA_AS_VA,
626         .probe = eth_i40e_pci_probe,
627         .remove = eth_i40e_pci_remove,
628 };
629
630 static inline int
631 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
632                                      struct rte_eth_link *link)
633 {
634         struct rte_eth_link *dst = link;
635         struct rte_eth_link *src = &(dev->data->dev_link);
636
637         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
638                                         *(uint64_t *)src) == 0)
639                 return -1;
640
641         return 0;
642 }
643
644 static inline int
645 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
646                                       struct rte_eth_link *link)
647 {
648         struct rte_eth_link *dst = &(dev->data->dev_link);
649         struct rte_eth_link *src = link;
650
651         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
652                                         *(uint64_t *)src) == 0)
653                 return -1;
654
655         return 0;
656 }
657
658 static inline void
659 i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
660 {
661         i40e_write_rx_ctl(hw, reg_addr, reg_val);
662         PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
663                     "with value 0x%08x",
664                     reg_addr, reg_val);
665 }
666
667 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
668 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
669 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
670
671 #ifndef I40E_GLQF_ORT
672 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
673 #endif
674 #ifndef I40E_GLQF_PIT
675 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
676 #endif
677 #ifndef I40E_GLQF_L3_MAP
678 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
679 #endif
680
681 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
682 {
683         /*
684          * Initialize registers for parsing packet type of QinQ
685          * This should be removed from code once proper
686          * configuration API is added to avoid configuration conflicts
687          * between ports of the same device.
688          */
689         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
690         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
691         i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
692 }
693
694 static inline void i40e_config_automask(struct i40e_pf *pf)
695 {
696         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
697         uint32_t val;
698
699         /* INTENA flag is not auto-cleared for interrupt */
700         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
701         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
702                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
703
704         /* If support multi-driver, PF will use INT0. */
705         if (!pf->support_multi_driver)
706                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
707
708         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
709 }
710
711 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
712
713 /*
714  * Add a ethertype filter to drop all flow control frames transmitted
715  * from VSIs.
716 */
717 static void
718 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
719 {
720         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
721         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
722                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
723                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
724         int ret;
725
726         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
727                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
728                                 pf->main_vsi_seid, 0,
729                                 TRUE, NULL, NULL);
730         if (ret)
731                 PMD_INIT_LOG(ERR,
732                         "Failed to add filter to drop flow control frames from VSIs.");
733 }
734
735 static int
736 floating_veb_list_handler(__rte_unused const char *key,
737                           const char *floating_veb_value,
738                           void *opaque)
739 {
740         int idx = 0;
741         unsigned int count = 0;
742         char *end = NULL;
743         int min, max;
744         bool *vf_floating_veb = opaque;
745
746         while (isblank(*floating_veb_value))
747                 floating_veb_value++;
748
749         /* Reset floating VEB configuration for VFs */
750         for (idx = 0; idx < I40E_MAX_VF; idx++)
751                 vf_floating_veb[idx] = false;
752
753         min = I40E_MAX_VF;
754         do {
755                 while (isblank(*floating_veb_value))
756                         floating_veb_value++;
757                 if (*floating_veb_value == '\0')
758                         return -1;
759                 errno = 0;
760                 idx = strtoul(floating_veb_value, &end, 10);
761                 if (errno || end == NULL)
762                         return -1;
763                 while (isblank(*end))
764                         end++;
765                 if (*end == '-') {
766                         min = idx;
767                 } else if ((*end == ';') || (*end == '\0')) {
768                         max = idx;
769                         if (min == I40E_MAX_VF)
770                                 min = idx;
771                         if (max >= I40E_MAX_VF)
772                                 max = I40E_MAX_VF - 1;
773                         for (idx = min; idx <= max; idx++) {
774                                 vf_floating_veb[idx] = true;
775                                 count++;
776                         }
777                         min = I40E_MAX_VF;
778                 } else {
779                         return -1;
780                 }
781                 floating_veb_value = end + 1;
782         } while (*end != '\0');
783
784         if (count == 0)
785                 return -1;
786
787         return 0;
788 }
789
790 static void
791 config_vf_floating_veb(struct rte_devargs *devargs,
792                        uint16_t floating_veb,
793                        bool *vf_floating_veb)
794 {
795         struct rte_kvargs *kvlist;
796         int i;
797         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
798
799         if (!floating_veb)
800                 return;
801         /* All the VFs attach to the floating VEB by default
802          * when the floating VEB is enabled.
803          */
804         for (i = 0; i < I40E_MAX_VF; i++)
805                 vf_floating_veb[i] = true;
806
807         if (devargs == NULL)
808                 return;
809
810         kvlist = rte_kvargs_parse(devargs->args, NULL);
811         if (kvlist == NULL)
812                 return;
813
814         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
815                 rte_kvargs_free(kvlist);
816                 return;
817         }
818         /* When the floating_veb_list parameter exists, all the VFs
819          * will attach to the legacy VEB firstly, then configure VFs
820          * to the floating VEB according to the floating_veb_list.
821          */
822         if (rte_kvargs_process(kvlist, floating_veb_list,
823                                floating_veb_list_handler,
824                                vf_floating_veb) < 0) {
825                 rte_kvargs_free(kvlist);
826                 return;
827         }
828         rte_kvargs_free(kvlist);
829 }
830
831 static int
832 i40e_check_floating_handler(__rte_unused const char *key,
833                             const char *value,
834                             __rte_unused void *opaque)
835 {
836         if (strcmp(value, "1"))
837                 return -1;
838
839         return 0;
840 }
841
842 static int
843 is_floating_veb_supported(struct rte_devargs *devargs)
844 {
845         struct rte_kvargs *kvlist;
846         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
847
848         if (devargs == NULL)
849                 return 0;
850
851         kvlist = rte_kvargs_parse(devargs->args, NULL);
852         if (kvlist == NULL)
853                 return 0;
854
855         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
856                 rte_kvargs_free(kvlist);
857                 return 0;
858         }
859         /* Floating VEB is enabled when there's key-value:
860          * enable_floating_veb=1
861          */
862         if (rte_kvargs_process(kvlist, floating_veb_key,
863                                i40e_check_floating_handler, NULL) < 0) {
864                 rte_kvargs_free(kvlist);
865                 return 0;
866         }
867         rte_kvargs_free(kvlist);
868
869         return 1;
870 }
871
872 static void
873 config_floating_veb(struct rte_eth_dev *dev)
874 {
875         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
876         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
877         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
878
879         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
880
881         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
882                 pf->floating_veb =
883                         is_floating_veb_supported(pci_dev->device.devargs);
884                 config_vf_floating_veb(pci_dev->device.devargs,
885                                        pf->floating_veb,
886                                        pf->floating_veb_list);
887         } else {
888                 pf->floating_veb = false;
889         }
890 }
891
892 #define I40E_L2_TAGS_S_TAG_SHIFT 1
893 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
894
895 static int
896 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
897 {
898         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
899         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
900         char ethertype_hash_name[RTE_HASH_NAMESIZE];
901         int ret;
902
903         struct rte_hash_parameters ethertype_hash_params = {
904                 .name = ethertype_hash_name,
905                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
906                 .key_len = sizeof(struct i40e_ethertype_filter_input),
907                 .hash_func = rte_hash_crc,
908                 .hash_func_init_val = 0,
909                 .socket_id = rte_socket_id(),
910         };
911
912         /* Initialize ethertype filter rule list and hash */
913         TAILQ_INIT(&ethertype_rule->ethertype_list);
914         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
915                  "ethertype_%s", dev->device->name);
916         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
917         if (!ethertype_rule->hash_table) {
918                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
919                 return -EINVAL;
920         }
921         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
922                                        sizeof(struct i40e_ethertype_filter *) *
923                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
924                                        0);
925         if (!ethertype_rule->hash_map) {
926                 PMD_INIT_LOG(ERR,
927                              "Failed to allocate memory for ethertype hash map!");
928                 ret = -ENOMEM;
929                 goto err_ethertype_hash_map_alloc;
930         }
931
932         return 0;
933
934 err_ethertype_hash_map_alloc:
935         rte_hash_free(ethertype_rule->hash_table);
936
937         return ret;
938 }
939
940 static int
941 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
942 {
943         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
944         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
945         char tunnel_hash_name[RTE_HASH_NAMESIZE];
946         int ret;
947
948         struct rte_hash_parameters tunnel_hash_params = {
949                 .name = tunnel_hash_name,
950                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
951                 .key_len = sizeof(struct i40e_tunnel_filter_input),
952                 .hash_func = rte_hash_crc,
953                 .hash_func_init_val = 0,
954                 .socket_id = rte_socket_id(),
955         };
956
957         /* Initialize tunnel filter rule list and hash */
958         TAILQ_INIT(&tunnel_rule->tunnel_list);
959         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
960                  "tunnel_%s", dev->device->name);
961         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
962         if (!tunnel_rule->hash_table) {
963                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
964                 return -EINVAL;
965         }
966         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
967                                     sizeof(struct i40e_tunnel_filter *) *
968                                     I40E_MAX_TUNNEL_FILTER_NUM,
969                                     0);
970         if (!tunnel_rule->hash_map) {
971                 PMD_INIT_LOG(ERR,
972                              "Failed to allocate memory for tunnel hash map!");
973                 ret = -ENOMEM;
974                 goto err_tunnel_hash_map_alloc;
975         }
976
977         return 0;
978
979 err_tunnel_hash_map_alloc:
980         rte_hash_free(tunnel_rule->hash_table);
981
982         return ret;
983 }
984
985 static int
986 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
987 {
988         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
989         struct i40e_fdir_info *fdir_info = &pf->fdir;
990         char fdir_hash_name[RTE_HASH_NAMESIZE];
991         int ret;
992
993         struct rte_hash_parameters fdir_hash_params = {
994                 .name = fdir_hash_name,
995                 .entries = I40E_MAX_FDIR_FILTER_NUM,
996                 .key_len = sizeof(struct i40e_fdir_input),
997                 .hash_func = rte_hash_crc,
998                 .hash_func_init_val = 0,
999                 .socket_id = rte_socket_id(),
1000         };
1001
1002         /* Initialize flow director filter rule list and hash */
1003         TAILQ_INIT(&fdir_info->fdir_list);
1004         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1005                  "fdir_%s", dev->device->name);
1006         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1007         if (!fdir_info->hash_table) {
1008                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1009                 return -EINVAL;
1010         }
1011         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1012                                           sizeof(struct i40e_fdir_filter *) *
1013                                           I40E_MAX_FDIR_FILTER_NUM,
1014                                           0);
1015         if (!fdir_info->hash_map) {
1016                 PMD_INIT_LOG(ERR,
1017                              "Failed to allocate memory for fdir hash map!");
1018                 ret = -ENOMEM;
1019                 goto err_fdir_hash_map_alloc;
1020         }
1021         return 0;
1022
1023 err_fdir_hash_map_alloc:
1024         rte_hash_free(fdir_info->hash_table);
1025
1026         return ret;
1027 }
1028
1029 static void
1030 i40e_init_customized_info(struct i40e_pf *pf)
1031 {
1032         int i;
1033
1034         /* Initialize customized pctype */
1035         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1036                 pf->customized_pctype[i].index = i;
1037                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1038                 pf->customized_pctype[i].valid = false;
1039         }
1040
1041         pf->gtp_support = false;
1042 }
1043
1044 void
1045 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1046 {
1047         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1048         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1049         struct i40e_queue_regions *info = &pf->queue_region;
1050         uint16_t i;
1051
1052         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1053                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1054
1055         memset(info, 0, sizeof(struct i40e_queue_regions));
1056 }
1057
1058 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
1059
1060 static int
1061 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1062                                const char *value,
1063                                void *opaque)
1064 {
1065         struct i40e_pf *pf;
1066         unsigned long support_multi_driver;
1067         char *end;
1068
1069         pf = (struct i40e_pf *)opaque;
1070
1071         errno = 0;
1072         support_multi_driver = strtoul(value, &end, 10);
1073         if (errno != 0 || end == value || *end != 0) {
1074                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1075                 return -(EINVAL);
1076         }
1077
1078         if (support_multi_driver == 1 || support_multi_driver == 0)
1079                 pf->support_multi_driver = (bool)support_multi_driver;
1080         else
1081                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1082                             "enable global configuration by default."
1083                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1084         return 0;
1085 }
1086
1087 static int
1088 i40e_support_multi_driver(struct rte_eth_dev *dev)
1089 {
1090         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1091         static const char *const valid_keys[] = {
1092                 ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
1093         struct rte_kvargs *kvlist;
1094
1095         /* Enable global configuration by default */
1096         pf->support_multi_driver = false;
1097
1098         if (!dev->device->devargs)
1099                 return 0;
1100
1101         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1102         if (!kvlist)
1103                 return -EINVAL;
1104
1105         if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
1106                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1107                             "the first invalid or last valid one is used !",
1108                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1109
1110         rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1111                            i40e_parse_multi_drv_handler, pf);
1112         rte_kvargs_free(kvlist);
1113         return 0;
1114 }
1115
1116 static int
1117 eth_i40e_dev_init(struct rte_eth_dev *dev)
1118 {
1119         struct rte_pci_device *pci_dev;
1120         struct rte_intr_handle *intr_handle;
1121         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1122         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1123         struct i40e_vsi *vsi;
1124         int ret;
1125         uint32_t len;
1126         uint8_t aq_fail = 0;
1127
1128         PMD_INIT_FUNC_TRACE();
1129
1130         dev->dev_ops = &i40e_eth_dev_ops;
1131         dev->rx_pkt_burst = i40e_recv_pkts;
1132         dev->tx_pkt_burst = i40e_xmit_pkts;
1133         dev->tx_pkt_prepare = i40e_prep_pkts;
1134
1135         /* for secondary processes, we don't initialise any further as primary
1136          * has already done this work. Only check we don't need a different
1137          * RX function */
1138         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1139                 i40e_set_rx_function(dev);
1140                 i40e_set_tx_function(dev);
1141                 return 0;
1142         }
1143         i40e_set_default_ptype_table(dev);
1144         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1145         intr_handle = &pci_dev->intr_handle;
1146
1147         rte_eth_copy_pci_info(dev, pci_dev);
1148
1149         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1150         pf->adapter->eth_dev = dev;
1151         pf->dev_data = dev->data;
1152
1153         hw->back = I40E_PF_TO_ADAPTER(pf);
1154         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1155         if (!hw->hw_addr) {
1156                 PMD_INIT_LOG(ERR,
1157                         "Hardware is not available, as address is NULL");
1158                 return -ENODEV;
1159         }
1160
1161         hw->vendor_id = pci_dev->id.vendor_id;
1162         hw->device_id = pci_dev->id.device_id;
1163         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1164         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1165         hw->bus.device = pci_dev->addr.devid;
1166         hw->bus.func = pci_dev->addr.function;
1167         hw->adapter_stopped = 0;
1168
1169         /* Check if need to support multi-driver */
1170         i40e_support_multi_driver(dev);
1171
1172         /* Make sure all is clean before doing PF reset */
1173         i40e_clear_hw(hw);
1174
1175         /* Initialize the hardware */
1176         i40e_hw_init(dev);
1177
1178         /* Reset here to make sure all is clean for each PF */
1179         ret = i40e_pf_reset(hw);
1180         if (ret) {
1181                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1182                 return ret;
1183         }
1184
1185         /* Initialize the shared code (base driver) */
1186         ret = i40e_init_shared_code(hw);
1187         if (ret) {
1188                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1189                 return ret;
1190         }
1191
1192         i40e_config_automask(pf);
1193
1194         i40e_set_default_pctype_table(dev);
1195
1196         /*
1197          * To work around the NVM issue, initialize registers
1198          * for packet type of QinQ by software.
1199          * It should be removed once issues are fixed in NVM.
1200          */
1201         if (!pf->support_multi_driver)
1202                 i40e_GLQF_reg_init(hw);
1203
1204         /* Initialize the input set for filters (hash and fd) to default value */
1205         i40e_filter_input_set_init(pf);
1206
1207         /* Initialize the parameters for adminq */
1208         i40e_init_adminq_parameter(hw);
1209         ret = i40e_init_adminq(hw);
1210         if (ret != I40E_SUCCESS) {
1211                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1212                 return -EIO;
1213         }
1214         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1215                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1216                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1217                      ((hw->nvm.version >> 12) & 0xf),
1218                      ((hw->nvm.version >> 4) & 0xff),
1219                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1220
1221         /* initialise the L3_MAP register */
1222         if (!pf->support_multi_driver) {
1223                 ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
1224                                                    0x00000028,  NULL);
1225                 if (ret)
1226                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1227                                      ret);
1228                 PMD_INIT_LOG(DEBUG,
1229                              "Global register 0x%08x is changed with 0x28",
1230                              I40E_GLQF_L3_MAP(40));
1231                 i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER);
1232         }
1233
1234         /* Need the special FW version to support floating VEB */
1235         config_floating_veb(dev);
1236         /* Clear PXE mode */
1237         i40e_clear_pxe_mode(hw);
1238         i40e_dev_sync_phy_type(hw);
1239
1240         /*
1241          * On X710, performance number is far from the expectation on recent
1242          * firmware versions. The fix for this issue may not be integrated in
1243          * the following firmware version. So the workaround in software driver
1244          * is needed. It needs to modify the initial values of 3 internal only
1245          * registers. Note that the workaround can be removed when it is fixed
1246          * in firmware in the future.
1247          */
1248         i40e_configure_registers(hw);
1249
1250         /* Get hw capabilities */
1251         ret = i40e_get_cap(hw);
1252         if (ret != I40E_SUCCESS) {
1253                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1254                 goto err_get_capabilities;
1255         }
1256
1257         /* Initialize parameters for PF */
1258         ret = i40e_pf_parameter_init(dev);
1259         if (ret != 0) {
1260                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1261                 goto err_parameter_init;
1262         }
1263
1264         /* Initialize the queue management */
1265         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1266         if (ret < 0) {
1267                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1268                 goto err_qp_pool_init;
1269         }
1270         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1271                                 hw->func_caps.num_msix_vectors - 1);
1272         if (ret < 0) {
1273                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1274                 goto err_msix_pool_init;
1275         }
1276
1277         /* Initialize lan hmc */
1278         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1279                                 hw->func_caps.num_rx_qp, 0, 0);
1280         if (ret != I40E_SUCCESS) {
1281                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1282                 goto err_init_lan_hmc;
1283         }
1284
1285         /* Configure lan hmc */
1286         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1287         if (ret != I40E_SUCCESS) {
1288                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1289                 goto err_configure_lan_hmc;
1290         }
1291
1292         /* Get and check the mac address */
1293         i40e_get_mac_addr(hw, hw->mac.addr);
1294         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1295                 PMD_INIT_LOG(ERR, "mac address is not valid");
1296                 ret = -EIO;
1297                 goto err_get_mac_addr;
1298         }
1299         /* Copy the permanent MAC address */
1300         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1301                         (struct ether_addr *) hw->mac.perm_addr);
1302
1303         /* Disable flow control */
1304         hw->fc.requested_mode = I40E_FC_NONE;
1305         i40e_set_fc(hw, &aq_fail, TRUE);
1306
1307         /* Set the global registers with default ether type value */
1308         if (!pf->support_multi_driver) {
1309                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1310                                          ETHER_TYPE_VLAN);
1311                 if (ret != I40E_SUCCESS) {
1312                         PMD_INIT_LOG(ERR,
1313                                      "Failed to set the default outer "
1314                                      "VLAN ether type");
1315                         goto err_setup_pf_switch;
1316                 }
1317         }
1318
1319         /* PF setup, which includes VSI setup */
1320         ret = i40e_pf_setup(pf);
1321         if (ret) {
1322                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1323                 goto err_setup_pf_switch;
1324         }
1325
1326         /* reset all stats of the device, including pf and main vsi */
1327         i40e_dev_stats_reset(dev);
1328
1329         vsi = pf->main_vsi;
1330
1331         /* Disable double vlan by default */
1332         i40e_vsi_config_double_vlan(vsi, FALSE);
1333
1334         /* Disable S-TAG identification when floating_veb is disabled */
1335         if (!pf->floating_veb) {
1336                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1337                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1338                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1339                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1340                 }
1341         }
1342
1343         if (!vsi->max_macaddrs)
1344                 len = ETHER_ADDR_LEN;
1345         else
1346                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1347
1348         /* Should be after VSI initialized */
1349         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1350         if (!dev->data->mac_addrs) {
1351                 PMD_INIT_LOG(ERR,
1352                         "Failed to allocated memory for storing mac address");
1353                 goto err_mac_alloc;
1354         }
1355         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1356                                         &dev->data->mac_addrs[0]);
1357
1358         /* Init dcb to sw mode by default */
1359         ret = i40e_dcb_init_configure(dev, TRUE);
1360         if (ret != I40E_SUCCESS) {
1361                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1362                 pf->flags &= ~I40E_FLAG_DCB;
1363         }
1364         /* Update HW struct after DCB configuration */
1365         i40e_get_cap(hw);
1366
1367         /* initialize pf host driver to setup SRIOV resource if applicable */
1368         i40e_pf_host_init(dev);
1369
1370         /* register callback func to eal lib */
1371         rte_intr_callback_register(intr_handle,
1372                                    i40e_dev_interrupt_handler, dev);
1373
1374         /* configure and enable device interrupt */
1375         i40e_pf_config_irq0(hw, TRUE);
1376         i40e_pf_enable_irq0(hw);
1377
1378         /* enable uio intr after callback register */
1379         rte_intr_enable(intr_handle);
1380
1381         /* By default disable flexible payload in global configuration */
1382         if (!pf->support_multi_driver)
1383                 i40e_flex_payload_reg_set_default(hw);
1384
1385         /*
1386          * Add an ethertype filter to drop all flow control frames transmitted
1387          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1388          * frames to wire.
1389          */
1390         i40e_add_tx_flow_control_drop_filter(pf);
1391
1392         /* Set the max frame size to 0x2600 by default,
1393          * in case other drivers changed the default value.
1394          */
1395         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1396
1397         /* initialize mirror rule list */
1398         TAILQ_INIT(&pf->mirror_list);
1399
1400         /* initialize Traffic Manager configuration */
1401         i40e_tm_conf_init(dev);
1402
1403         /* Initialize customized information */
1404         i40e_init_customized_info(pf);
1405
1406         ret = i40e_init_ethtype_filter_list(dev);
1407         if (ret < 0)
1408                 goto err_init_ethtype_filter_list;
1409         ret = i40e_init_tunnel_filter_list(dev);
1410         if (ret < 0)
1411                 goto err_init_tunnel_filter_list;
1412         ret = i40e_init_fdir_filter_list(dev);
1413         if (ret < 0)
1414                 goto err_init_fdir_filter_list;
1415
1416         /* initialize queue region configuration */
1417         i40e_init_queue_region_conf(dev);
1418
1419         /* initialize rss configuration from rte_flow */
1420         memset(&pf->rss_info, 0,
1421                 sizeof(struct i40e_rte_flow_rss_conf));
1422
1423         return 0;
1424
1425 err_init_fdir_filter_list:
1426         rte_free(pf->tunnel.hash_table);
1427         rte_free(pf->tunnel.hash_map);
1428 err_init_tunnel_filter_list:
1429         rte_free(pf->ethertype.hash_table);
1430         rte_free(pf->ethertype.hash_map);
1431 err_init_ethtype_filter_list:
1432         rte_free(dev->data->mac_addrs);
1433 err_mac_alloc:
1434         i40e_vsi_release(pf->main_vsi);
1435 err_setup_pf_switch:
1436 err_get_mac_addr:
1437 err_configure_lan_hmc:
1438         (void)i40e_shutdown_lan_hmc(hw);
1439 err_init_lan_hmc:
1440         i40e_res_pool_destroy(&pf->msix_pool);
1441 err_msix_pool_init:
1442         i40e_res_pool_destroy(&pf->qp_pool);
1443 err_qp_pool_init:
1444 err_parameter_init:
1445 err_get_capabilities:
1446         (void)i40e_shutdown_adminq(hw);
1447
1448         return ret;
1449 }
1450
1451 static void
1452 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1453 {
1454         struct i40e_ethertype_filter *p_ethertype;
1455         struct i40e_ethertype_rule *ethertype_rule;
1456
1457         ethertype_rule = &pf->ethertype;
1458         /* Remove all ethertype filter rules and hash */
1459         if (ethertype_rule->hash_map)
1460                 rte_free(ethertype_rule->hash_map);
1461         if (ethertype_rule->hash_table)
1462                 rte_hash_free(ethertype_rule->hash_table);
1463
1464         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1465                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1466                              p_ethertype, rules);
1467                 rte_free(p_ethertype);
1468         }
1469 }
1470
1471 static void
1472 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1473 {
1474         struct i40e_tunnel_filter *p_tunnel;
1475         struct i40e_tunnel_rule *tunnel_rule;
1476
1477         tunnel_rule = &pf->tunnel;
1478         /* Remove all tunnel director rules and hash */
1479         if (tunnel_rule->hash_map)
1480                 rte_free(tunnel_rule->hash_map);
1481         if (tunnel_rule->hash_table)
1482                 rte_hash_free(tunnel_rule->hash_table);
1483
1484         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1485                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1486                 rte_free(p_tunnel);
1487         }
1488 }
1489
1490 static void
1491 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1492 {
1493         struct i40e_fdir_filter *p_fdir;
1494         struct i40e_fdir_info *fdir_info;
1495
1496         fdir_info = &pf->fdir;
1497         /* Remove all flow director rules and hash */
1498         if (fdir_info->hash_map)
1499                 rte_free(fdir_info->hash_map);
1500         if (fdir_info->hash_table)
1501                 rte_hash_free(fdir_info->hash_table);
1502
1503         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1504                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1505                 rte_free(p_fdir);
1506         }
1507 }
1508
1509 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1510 {
1511         /*
1512          * Disable by default flexible payload
1513          * for corresponding L2/L3/L4 layers.
1514          */
1515         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1516         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1517         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1518         i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD);
1519 }
1520
1521 static int
1522 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1523 {
1524         struct i40e_pf *pf;
1525         struct rte_pci_device *pci_dev;
1526         struct rte_intr_handle *intr_handle;
1527         struct i40e_hw *hw;
1528         struct i40e_filter_control_settings settings;
1529         struct rte_flow *p_flow;
1530         int ret;
1531         uint8_t aq_fail = 0;
1532
1533         PMD_INIT_FUNC_TRACE();
1534
1535         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1536                 return 0;
1537
1538         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1539         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1540         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1541         intr_handle = &pci_dev->intr_handle;
1542
1543         if (hw->adapter_stopped == 0)
1544                 i40e_dev_close(dev);
1545
1546         dev->dev_ops = NULL;
1547         dev->rx_pkt_burst = NULL;
1548         dev->tx_pkt_burst = NULL;
1549
1550         /* Clear PXE mode */
1551         i40e_clear_pxe_mode(hw);
1552
1553         /* Unconfigure filter control */
1554         memset(&settings, 0, sizeof(settings));
1555         ret = i40e_set_filter_control(hw, &settings);
1556         if (ret)
1557                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1558                                         ret);
1559
1560         /* Disable flow control */
1561         hw->fc.requested_mode = I40E_FC_NONE;
1562         i40e_set_fc(hw, &aq_fail, TRUE);
1563
1564         /* uninitialize pf host driver */
1565         i40e_pf_host_uninit(dev);
1566
1567         rte_free(dev->data->mac_addrs);
1568         dev->data->mac_addrs = NULL;
1569
1570         /* disable uio intr before callback unregister */
1571         rte_intr_disable(intr_handle);
1572
1573         /* register callback func to eal lib */
1574         rte_intr_callback_unregister(intr_handle,
1575                                      i40e_dev_interrupt_handler, dev);
1576
1577         i40e_rm_ethtype_filter_list(pf);
1578         i40e_rm_tunnel_filter_list(pf);
1579         i40e_rm_fdir_filter_list(pf);
1580
1581         /* Remove all flows */
1582         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1583                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1584                 rte_free(p_flow);
1585         }
1586
1587         /* Remove all Traffic Manager configuration */
1588         i40e_tm_conf_uninit(dev);
1589
1590         return 0;
1591 }
1592
1593 static int
1594 i40e_dev_configure(struct rte_eth_dev *dev)
1595 {
1596         struct i40e_adapter *ad =
1597                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1598         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1599         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1600         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1601         int i, ret;
1602
1603         ret = i40e_dev_sync_phy_type(hw);
1604         if (ret)
1605                 return ret;
1606
1607         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1608          * bulk allocation or vector Rx preconditions we will reset it.
1609          */
1610         ad->rx_bulk_alloc_allowed = true;
1611         ad->rx_vec_allowed = true;
1612         ad->tx_simple_allowed = true;
1613         ad->tx_vec_allowed = true;
1614
1615         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1616                 ret = i40e_fdir_setup(pf);
1617                 if (ret != I40E_SUCCESS) {
1618                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1619                         return -ENOTSUP;
1620                 }
1621                 ret = i40e_fdir_configure(dev);
1622                 if (ret < 0) {
1623                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1624                         goto err;
1625                 }
1626         } else
1627                 i40e_fdir_teardown(pf);
1628
1629         ret = i40e_dev_init_vlan(dev);
1630         if (ret < 0)
1631                 goto err;
1632
1633         /* VMDQ setup.
1634          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1635          *  RSS setting have different requirements.
1636          *  General PMD driver call sequence are NIC init, configure,
1637          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1638          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1639          *  applicable. So, VMDQ setting has to be done before
1640          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1641          *  For RSS setting, it will try to calculate actual configured RX queue
1642          *  number, which will be available after rx_queue_setup(). dev_start()
1643          *  function is good to place RSS setup.
1644          */
1645         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1646                 ret = i40e_vmdq_setup(dev);
1647                 if (ret)
1648                         goto err;
1649         }
1650
1651         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1652                 ret = i40e_dcb_setup(dev);
1653                 if (ret) {
1654                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1655                         goto err_dcb;
1656                 }
1657         }
1658
1659         TAILQ_INIT(&pf->flow_list);
1660
1661         return 0;
1662
1663 err_dcb:
1664         /* need to release vmdq resource if exists */
1665         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1666                 i40e_vsi_release(pf->vmdq[i].vsi);
1667                 pf->vmdq[i].vsi = NULL;
1668         }
1669         rte_free(pf->vmdq);
1670         pf->vmdq = NULL;
1671 err:
1672         /* need to release fdir resource if exists */
1673         i40e_fdir_teardown(pf);
1674         return ret;
1675 }
1676
1677 void
1678 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1679 {
1680         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1681         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1682         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1683         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1684         uint16_t msix_vect = vsi->msix_intr;
1685         uint16_t i;
1686
1687         for (i = 0; i < vsi->nb_qps; i++) {
1688                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1689                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1690                 rte_wmb();
1691         }
1692
1693         if (vsi->type != I40E_VSI_SRIOV) {
1694                 if (!rte_intr_allow_others(intr_handle)) {
1695                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1696                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1697                         I40E_WRITE_REG(hw,
1698                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1699                                        0);
1700                 } else {
1701                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1702                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1703                         I40E_WRITE_REG(hw,
1704                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1705                                                        msix_vect - 1), 0);
1706                 }
1707         } else {
1708                 uint32_t reg;
1709                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1710                         vsi->user_param + (msix_vect - 1);
1711
1712                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1713                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1714         }
1715         I40E_WRITE_FLUSH(hw);
1716 }
1717
1718 static void
1719 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1720                        int base_queue, int nb_queue,
1721                        uint16_t itr_idx)
1722 {
1723         int i;
1724         uint32_t val;
1725         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1726         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1727
1728         /* Bind all RX queues to allocated MSIX interrupt */
1729         for (i = 0; i < nb_queue; i++) {
1730                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1731                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1732                         ((base_queue + i + 1) <<
1733                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1734                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1735                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1736
1737                 if (i == nb_queue - 1)
1738                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1739                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1740         }
1741
1742         /* Write first RX queue to Link list register as the head element */
1743         if (vsi->type != I40E_VSI_SRIOV) {
1744                 uint16_t interval =
1745                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1,
1746                                                pf->support_multi_driver);
1747
1748                 if (msix_vect == I40E_MISC_VEC_ID) {
1749                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1750                                        (base_queue <<
1751                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1752                                        (0x0 <<
1753                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1754                         I40E_WRITE_REG(hw,
1755                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1756                                        interval);
1757                 } else {
1758                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1759                                        (base_queue <<
1760                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1761                                        (0x0 <<
1762                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1763                         I40E_WRITE_REG(hw,
1764                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1765                                                        msix_vect - 1),
1766                                        interval);
1767                 }
1768         } else {
1769                 uint32_t reg;
1770
1771                 if (msix_vect == I40E_MISC_VEC_ID) {
1772                         I40E_WRITE_REG(hw,
1773                                        I40E_VPINT_LNKLST0(vsi->user_param),
1774                                        (base_queue <<
1775                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1776                                        (0x0 <<
1777                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1778                 } else {
1779                         /* num_msix_vectors_vf needs to minus irq0 */
1780                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1781                                 vsi->user_param + (msix_vect - 1);
1782
1783                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1784                                        (base_queue <<
1785                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1786                                        (0x0 <<
1787                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1788                 }
1789         }
1790
1791         I40E_WRITE_FLUSH(hw);
1792 }
1793
1794 void
1795 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
1796 {
1797         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1798         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1799         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1800         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1801         uint16_t msix_vect = vsi->msix_intr;
1802         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1803         uint16_t queue_idx = 0;
1804         int record = 0;
1805         int i;
1806
1807         for (i = 0; i < vsi->nb_qps; i++) {
1808                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1809                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1810         }
1811
1812         /* VF bind interrupt */
1813         if (vsi->type == I40E_VSI_SRIOV) {
1814                 __vsi_queues_bind_intr(vsi, msix_vect,
1815                                        vsi->base_queue, vsi->nb_qps,
1816                                        itr_idx);
1817                 return;
1818         }
1819
1820         /* PF & VMDq bind interrupt */
1821         if (rte_intr_dp_is_en(intr_handle)) {
1822                 if (vsi->type == I40E_VSI_MAIN) {
1823                         queue_idx = 0;
1824                         record = 1;
1825                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1826                         struct i40e_vsi *main_vsi =
1827                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1828                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1829                         record = 1;
1830                 }
1831         }
1832
1833         for (i = 0; i < vsi->nb_used_qps; i++) {
1834                 if (nb_msix <= 1) {
1835                         if (!rte_intr_allow_others(intr_handle))
1836                                 /* allow to share MISC_VEC_ID */
1837                                 msix_vect = I40E_MISC_VEC_ID;
1838
1839                         /* no enough msix_vect, map all to one */
1840                         __vsi_queues_bind_intr(vsi, msix_vect,
1841                                                vsi->base_queue + i,
1842                                                vsi->nb_used_qps - i,
1843                                                itr_idx);
1844                         for (; !!record && i < vsi->nb_used_qps; i++)
1845                                 intr_handle->intr_vec[queue_idx + i] =
1846                                         msix_vect;
1847                         break;
1848                 }
1849                 /* 1:1 queue/msix_vect mapping */
1850                 __vsi_queues_bind_intr(vsi, msix_vect,
1851                                        vsi->base_queue + i, 1,
1852                                        itr_idx);
1853                 if (!!record)
1854                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1855
1856                 msix_vect++;
1857                 nb_msix--;
1858         }
1859 }
1860
1861 static void
1862 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1863 {
1864         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1865         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1866         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1867         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1868         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1869         uint16_t msix_intr, i;
1870
1871         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1872                 for (i = 0; i < vsi->nb_msix; i++) {
1873                         msix_intr = vsi->msix_intr + i;
1874                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1875                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1876                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1877                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1878                 }
1879         else
1880                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1881                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1882                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1883                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1884
1885         I40E_WRITE_FLUSH(hw);
1886 }
1887
1888 static void
1889 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1890 {
1891         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1892         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1893         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1894         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1895         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1896         uint16_t msix_intr, i;
1897
1898         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1899                 for (i = 0; i < vsi->nb_msix; i++) {
1900                         msix_intr = vsi->msix_intr + i;
1901                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1902                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1903                 }
1904         else
1905                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1906                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1907
1908         I40E_WRITE_FLUSH(hw);
1909 }
1910
1911 static inline uint8_t
1912 i40e_parse_link_speeds(uint16_t link_speeds)
1913 {
1914         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1915
1916         if (link_speeds & ETH_LINK_SPEED_40G)
1917                 link_speed |= I40E_LINK_SPEED_40GB;
1918         if (link_speeds & ETH_LINK_SPEED_25G)
1919                 link_speed |= I40E_LINK_SPEED_25GB;
1920         if (link_speeds & ETH_LINK_SPEED_20G)
1921                 link_speed |= I40E_LINK_SPEED_20GB;
1922         if (link_speeds & ETH_LINK_SPEED_10G)
1923                 link_speed |= I40E_LINK_SPEED_10GB;
1924         if (link_speeds & ETH_LINK_SPEED_1G)
1925                 link_speed |= I40E_LINK_SPEED_1GB;
1926         if (link_speeds & ETH_LINK_SPEED_100M)
1927                 link_speed |= I40E_LINK_SPEED_100MB;
1928
1929         return link_speed;
1930 }
1931
1932 static int
1933 i40e_phy_conf_link(struct i40e_hw *hw,
1934                    uint8_t abilities,
1935                    uint8_t force_speed,
1936                    bool is_up)
1937 {
1938         enum i40e_status_code status;
1939         struct i40e_aq_get_phy_abilities_resp phy_ab;
1940         struct i40e_aq_set_phy_config phy_conf;
1941         enum i40e_aq_phy_type cnt;
1942         uint32_t phy_type_mask = 0;
1943
1944         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1945                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1946                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1947                         I40E_AQ_PHY_FLAG_LOW_POWER;
1948         const uint8_t advt = I40E_LINK_SPEED_40GB |
1949                         I40E_LINK_SPEED_25GB |
1950                         I40E_LINK_SPEED_10GB |
1951                         I40E_LINK_SPEED_1GB |
1952                         I40E_LINK_SPEED_100MB;
1953         int ret = -ENOTSUP;
1954
1955
1956         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1957                                               NULL);
1958         if (status)
1959                 return ret;
1960
1961         /* If link already up, no need to set up again */
1962         if (is_up && phy_ab.phy_type != 0)
1963                 return I40E_SUCCESS;
1964
1965         memset(&phy_conf, 0, sizeof(phy_conf));
1966
1967         /* bits 0-2 use the values from get_phy_abilities_resp */
1968         abilities &= ~mask;
1969         abilities |= phy_ab.abilities & mask;
1970
1971         /* update ablities and speed */
1972         if (abilities & I40E_AQ_PHY_AN_ENABLED)
1973                 phy_conf.link_speed = advt;
1974         else
1975                 phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
1976
1977         phy_conf.abilities = abilities;
1978
1979
1980
1981         /* To enable link, phy_type mask needs to include each type */
1982         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
1983                 phy_type_mask |= 1 << cnt;
1984
1985         /* use get_phy_abilities_resp value for the rest */
1986         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
1987         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
1988                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
1989                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
1990         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
1991         phy_conf.eee_capability = phy_ab.eee_capability;
1992         phy_conf.eeer = phy_ab.eeer_val;
1993         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1994
1995         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1996                     phy_ab.abilities, phy_ab.link_speed);
1997         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
1998                     phy_conf.abilities, phy_conf.link_speed);
1999
2000         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2001         if (status)
2002                 return ret;
2003
2004         return I40E_SUCCESS;
2005 }
2006
2007 static int
2008 i40e_apply_link_speed(struct rte_eth_dev *dev)
2009 {
2010         uint8_t speed;
2011         uint8_t abilities = 0;
2012         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2013         struct rte_eth_conf *conf = &dev->data->dev_conf;
2014
2015         speed = i40e_parse_link_speeds(conf->link_speeds);
2016         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2017         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
2018                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2019         abilities |= I40E_AQ_PHY_LINK_ENABLED;
2020
2021         return i40e_phy_conf_link(hw, abilities, speed, true);
2022 }
2023
2024 static int
2025 i40e_dev_start(struct rte_eth_dev *dev)
2026 {
2027         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2028         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2029         struct i40e_vsi *main_vsi = pf->main_vsi;
2030         int ret, i;
2031         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2032         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2033         uint32_t intr_vector = 0;
2034         struct i40e_vsi *vsi;
2035
2036         hw->adapter_stopped = 0;
2037
2038         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2039                 PMD_INIT_LOG(ERR,
2040                 "Invalid link_speeds for port %u, autonegotiation disabled",
2041                               dev->data->port_id);
2042                 return -EINVAL;
2043         }
2044
2045         rte_intr_disable(intr_handle);
2046
2047         if ((rte_intr_cap_multiple(intr_handle) ||
2048              !RTE_ETH_DEV_SRIOV(dev).active) &&
2049             dev->data->dev_conf.intr_conf.rxq != 0) {
2050                 intr_vector = dev->data->nb_rx_queues;
2051                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2052                 if (ret)
2053                         return ret;
2054         }
2055
2056         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2057                 intr_handle->intr_vec =
2058                         rte_zmalloc("intr_vec",
2059                                     dev->data->nb_rx_queues * sizeof(int),
2060                                     0);
2061                 if (!intr_handle->intr_vec) {
2062                         PMD_INIT_LOG(ERR,
2063                                 "Failed to allocate %d rx_queues intr_vec",
2064                                 dev->data->nb_rx_queues);
2065                         return -ENOMEM;
2066                 }
2067         }
2068
2069         /* Initialize VSI */
2070         ret = i40e_dev_rxtx_init(pf);
2071         if (ret != I40E_SUCCESS) {
2072                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2073                 goto err_up;
2074         }
2075
2076         /* Map queues with MSIX interrupt */
2077         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2078                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2079         i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2080         i40e_vsi_enable_queues_intr(main_vsi);
2081
2082         /* Map VMDQ VSI queues with MSIX interrupt */
2083         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2084                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2085                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2086                                           I40E_ITR_INDEX_DEFAULT);
2087                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2088         }
2089
2090         /* enable FDIR MSIX interrupt */
2091         if (pf->fdir.fdir_vsi) {
2092                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
2093                                           I40E_ITR_INDEX_NONE);
2094                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
2095         }
2096
2097         /* Enable all queues which have been configured */
2098         ret = i40e_dev_switch_queues(pf, TRUE);
2099         if (ret != I40E_SUCCESS) {
2100                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
2101                 goto err_up;
2102         }
2103
2104         /* Enable receiving broadcast packets */
2105         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2106         if (ret != I40E_SUCCESS)
2107                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2108
2109         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2110                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2111                                                 true, NULL);
2112                 if (ret != I40E_SUCCESS)
2113                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2114         }
2115
2116         /* Enable the VLAN promiscuous mode. */
2117         if (pf->vfs) {
2118                 for (i = 0; i < pf->vf_num; i++) {
2119                         vsi = pf->vfs[i].vsi;
2120                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2121                                                      true, NULL);
2122                 }
2123         }
2124
2125         /* Enable mac loopback mode */
2126         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2127             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2128                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2129                 if (ret != I40E_SUCCESS) {
2130                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2131                         goto err_up;
2132                 }
2133         }
2134
2135         /* Apply link configure */
2136         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
2137                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
2138                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
2139                                 ETH_LINK_SPEED_40G)) {
2140                 PMD_DRV_LOG(ERR, "Invalid link setting");
2141                 goto err_up;
2142         }
2143         ret = i40e_apply_link_speed(dev);
2144         if (I40E_SUCCESS != ret) {
2145                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2146                 goto err_up;
2147         }
2148
2149         if (!rte_intr_allow_others(intr_handle)) {
2150                 rte_intr_callback_unregister(intr_handle,
2151                                              i40e_dev_interrupt_handler,
2152                                              (void *)dev);
2153                 /* configure and enable device interrupt */
2154                 i40e_pf_config_irq0(hw, FALSE);
2155                 i40e_pf_enable_irq0(hw);
2156
2157                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2158                         PMD_INIT_LOG(INFO,
2159                                 "lsc won't enable because of no intr multiplex");
2160         } else {
2161                 ret = i40e_aq_set_phy_int_mask(hw,
2162                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2163                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2164                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2165                 if (ret != I40E_SUCCESS)
2166                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2167
2168                 /* Call get_link_info aq commond to enable/disable LSE */
2169                 i40e_dev_link_update(dev, 0);
2170         }
2171
2172         /* enable uio intr after callback register */
2173         rte_intr_enable(intr_handle);
2174
2175         i40e_filter_restore(pf);
2176
2177         if (pf->tm_conf.root && !pf->tm_conf.committed)
2178                 PMD_DRV_LOG(WARNING,
2179                             "please call hierarchy_commit() "
2180                             "before starting the port");
2181
2182         return I40E_SUCCESS;
2183
2184 err_up:
2185         i40e_dev_switch_queues(pf, FALSE);
2186         i40e_dev_clear_queues(dev);
2187
2188         return ret;
2189 }
2190
2191 static void
2192 i40e_dev_stop(struct rte_eth_dev *dev)
2193 {
2194         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2195         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2196         struct i40e_vsi *main_vsi = pf->main_vsi;
2197         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2198         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2199         int i;
2200
2201         if (hw->adapter_stopped == 1)
2202                 return;
2203         /* Disable all queues */
2204         i40e_dev_switch_queues(pf, FALSE);
2205
2206         /* un-map queues with interrupt registers */
2207         i40e_vsi_disable_queues_intr(main_vsi);
2208         i40e_vsi_queues_unbind_intr(main_vsi);
2209
2210         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2211                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2212                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2213         }
2214
2215         if (pf->fdir.fdir_vsi) {
2216                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2217                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2218         }
2219         /* Clear all queues and release memory */
2220         i40e_dev_clear_queues(dev);
2221
2222         /* Set link down */
2223         i40e_dev_set_link_down(dev);
2224
2225         if (!rte_intr_allow_others(intr_handle))
2226                 /* resume to the default handler */
2227                 rte_intr_callback_register(intr_handle,
2228                                            i40e_dev_interrupt_handler,
2229                                            (void *)dev);
2230
2231         /* Clean datapath event and queue/vec mapping */
2232         rte_intr_efd_disable(intr_handle);
2233         if (intr_handle->intr_vec) {
2234                 rte_free(intr_handle->intr_vec);
2235                 intr_handle->intr_vec = NULL;
2236         }
2237
2238         /* reset hierarchy commit */
2239         pf->tm_conf.committed = false;
2240
2241         hw->adapter_stopped = 1;
2242 }
2243
2244 static void
2245 i40e_dev_close(struct rte_eth_dev *dev)
2246 {
2247         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2248         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2249         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2250         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2251         struct i40e_mirror_rule *p_mirror;
2252         uint32_t reg;
2253         int i;
2254         int ret;
2255
2256         PMD_INIT_FUNC_TRACE();
2257
2258         i40e_dev_stop(dev);
2259
2260         /* Remove all mirror rules */
2261         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2262                 ret = i40e_aq_del_mirror_rule(hw,
2263                                               pf->main_vsi->veb->seid,
2264                                               p_mirror->rule_type,
2265                                               p_mirror->entries,
2266                                               p_mirror->num_entries,
2267                                               p_mirror->id);
2268                 if (ret < 0)
2269                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2270                                     "status = %d, aq_err = %d.", ret,
2271                                     hw->aq.asq_last_status);
2272
2273                 /* remove mirror software resource anyway */
2274                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2275                 rte_free(p_mirror);
2276                 pf->nb_mirror_rule--;
2277         }
2278
2279         i40e_dev_free_queues(dev);
2280
2281         /* Disable interrupt */
2282         i40e_pf_disable_irq0(hw);
2283         rte_intr_disable(intr_handle);
2284
2285         /* shutdown and destroy the HMC */
2286         i40e_shutdown_lan_hmc(hw);
2287
2288         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2289                 i40e_vsi_release(pf->vmdq[i].vsi);
2290                 pf->vmdq[i].vsi = NULL;
2291         }
2292         rte_free(pf->vmdq);
2293         pf->vmdq = NULL;
2294
2295         /* release all the existing VSIs and VEBs */
2296         i40e_fdir_teardown(pf);
2297         i40e_vsi_release(pf->main_vsi);
2298
2299         /* shutdown the adminq */
2300         i40e_aq_queue_shutdown(hw, true);
2301         i40e_shutdown_adminq(hw);
2302
2303         i40e_res_pool_destroy(&pf->qp_pool);
2304         i40e_res_pool_destroy(&pf->msix_pool);
2305
2306         /* Disable flexible payload in global configuration */
2307         if (!pf->support_multi_driver)
2308                 i40e_flex_payload_reg_set_default(hw);
2309
2310         /* force a PF reset to clean anything leftover */
2311         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2312         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2313                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2314         I40E_WRITE_FLUSH(hw);
2315 }
2316
2317 /*
2318  * Reset PF device only to re-initialize resources in PMD layer
2319  */
2320 static int
2321 i40e_dev_reset(struct rte_eth_dev *dev)
2322 {
2323         int ret;
2324
2325         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2326          * its VF to make them align with it. The detailed notification
2327          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2328          * To avoid unexpected behavior in VF, currently reset of PF with
2329          * SR-IOV activation is not supported. It might be supported later.
2330          */
2331         if (dev->data->sriov.active)
2332                 return -ENOTSUP;
2333
2334         ret = eth_i40e_dev_uninit(dev);
2335         if (ret)
2336                 return ret;
2337
2338         ret = eth_i40e_dev_init(dev);
2339
2340         return ret;
2341 }
2342
2343 static void
2344 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2345 {
2346         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2347         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2348         struct i40e_vsi *vsi = pf->main_vsi;
2349         int status;
2350
2351         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2352                                                      true, NULL, true);
2353         if (status != I40E_SUCCESS)
2354                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2355
2356         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2357                                                         TRUE, NULL);
2358         if (status != I40E_SUCCESS)
2359                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2360
2361 }
2362
2363 static void
2364 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2365 {
2366         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2367         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2368         struct i40e_vsi *vsi = pf->main_vsi;
2369         int status;
2370
2371         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2372                                                      false, NULL, true);
2373         if (status != I40E_SUCCESS)
2374                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2375
2376         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2377                                                         false, NULL);
2378         if (status != I40E_SUCCESS)
2379                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2380 }
2381
2382 static void
2383 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2384 {
2385         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2386         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2387         struct i40e_vsi *vsi = pf->main_vsi;
2388         int ret;
2389
2390         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2391         if (ret != I40E_SUCCESS)
2392                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2393 }
2394
2395 static void
2396 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2397 {
2398         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2399         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2400         struct i40e_vsi *vsi = pf->main_vsi;
2401         int ret;
2402
2403         if (dev->data->promiscuous == 1)
2404                 return; /* must remain in all_multicast mode */
2405
2406         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2407                                 vsi->seid, FALSE, NULL);
2408         if (ret != I40E_SUCCESS)
2409                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2410 }
2411
2412 /*
2413  * Set device link up.
2414  */
2415 static int
2416 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2417 {
2418         /* re-apply link speed setting */
2419         return i40e_apply_link_speed(dev);
2420 }
2421
2422 /*
2423  * Set device link down.
2424  */
2425 static int
2426 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2427 {
2428         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2429         uint8_t abilities = 0;
2430         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2431
2432         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2433         return i40e_phy_conf_link(hw, abilities, speed, false);
2434 }
2435
2436 int
2437 i40e_dev_link_update(struct rte_eth_dev *dev,
2438                      int wait_to_complete)
2439 {
2440 #define CHECK_INTERVAL 100  /* 100ms */
2441 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
2442         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2443         struct i40e_link_status link_status;
2444         struct rte_eth_link link, old;
2445         int status;
2446         unsigned rep_cnt = MAX_REPEAT_TIME;
2447         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2448
2449         memset(&link, 0, sizeof(link));
2450         memset(&old, 0, sizeof(old));
2451         memset(&link_status, 0, sizeof(link_status));
2452         rte_i40e_dev_atomic_read_link_status(dev, &old);
2453
2454         do {
2455                 /* Get link status information from hardware */
2456                 status = i40e_aq_get_link_info(hw, enable_lse,
2457                                                 &link_status, NULL);
2458                 if (status != I40E_SUCCESS) {
2459                         link.link_speed = ETH_SPEED_NUM_100M;
2460                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2461                         PMD_DRV_LOG(ERR, "Failed to get link info");
2462                         goto out;
2463                 }
2464
2465                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
2466                 if (!wait_to_complete || link.link_status)
2467                         break;
2468
2469                 rte_delay_ms(CHECK_INTERVAL);
2470         } while (--rep_cnt);
2471
2472         if (!link.link_status)
2473                 goto out;
2474
2475         /* i40e uses full duplex only */
2476         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2477
2478         /* Parse the link status */
2479         switch (link_status.link_speed) {
2480         case I40E_LINK_SPEED_100MB:
2481                 link.link_speed = ETH_SPEED_NUM_100M;
2482                 break;
2483         case I40E_LINK_SPEED_1GB:
2484                 link.link_speed = ETH_SPEED_NUM_1G;
2485                 break;
2486         case I40E_LINK_SPEED_10GB:
2487                 link.link_speed = ETH_SPEED_NUM_10G;
2488                 break;
2489         case I40E_LINK_SPEED_20GB:
2490                 link.link_speed = ETH_SPEED_NUM_20G;
2491                 break;
2492         case I40E_LINK_SPEED_25GB:
2493                 link.link_speed = ETH_SPEED_NUM_25G;
2494                 break;
2495         case I40E_LINK_SPEED_40GB:
2496                 link.link_speed = ETH_SPEED_NUM_40G;
2497                 break;
2498         default:
2499                 link.link_speed = ETH_SPEED_NUM_100M;
2500                 break;
2501         }
2502
2503         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2504                         ETH_LINK_SPEED_FIXED);
2505
2506 out:
2507         rte_i40e_dev_atomic_write_link_status(dev, &link);
2508         if (link.link_status == old.link_status)
2509                 return -1;
2510
2511         i40e_notify_all_vfs_link_status(dev);
2512
2513         return 0;
2514 }
2515
2516 /* Get all the statistics of a VSI */
2517 void
2518 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2519 {
2520         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2521         struct i40e_eth_stats *nes = &vsi->eth_stats;
2522         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2523         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2524
2525         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2526                             vsi->offset_loaded, &oes->rx_bytes,
2527                             &nes->rx_bytes);
2528         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2529                             vsi->offset_loaded, &oes->rx_unicast,
2530                             &nes->rx_unicast);
2531         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2532                             vsi->offset_loaded, &oes->rx_multicast,
2533                             &nes->rx_multicast);
2534         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2535                             vsi->offset_loaded, &oes->rx_broadcast,
2536                             &nes->rx_broadcast);
2537         /* exclude CRC bytes */
2538         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2539                 nes->rx_broadcast) * ETHER_CRC_LEN;
2540
2541         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2542                             &oes->rx_discards, &nes->rx_discards);
2543         /* GLV_REPC not supported */
2544         /* GLV_RMPC not supported */
2545         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2546                             &oes->rx_unknown_protocol,
2547                             &nes->rx_unknown_protocol);
2548         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2549                             vsi->offset_loaded, &oes->tx_bytes,
2550                             &nes->tx_bytes);
2551         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2552                             vsi->offset_loaded, &oes->tx_unicast,
2553                             &nes->tx_unicast);
2554         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2555                             vsi->offset_loaded, &oes->tx_multicast,
2556                             &nes->tx_multicast);
2557         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2558                             vsi->offset_loaded,  &oes->tx_broadcast,
2559                             &nes->tx_broadcast);
2560         /* GLV_TDPC not supported */
2561         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2562                             &oes->tx_errors, &nes->tx_errors);
2563         vsi->offset_loaded = true;
2564
2565         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2566                     vsi->vsi_id);
2567         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2568         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2569         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2570         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2571         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2572         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2573                     nes->rx_unknown_protocol);
2574         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2575         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2576         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2577         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2578         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2579         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2580         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2581                     vsi->vsi_id);
2582 }
2583
2584 static void
2585 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2586 {
2587         unsigned int i;
2588         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2589         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2590
2591         /* Get rx/tx bytes of internal transfer packets */
2592         i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2593                         I40E_GLV_GORCL(hw->port),
2594                         pf->offset_loaded,
2595                         &pf->internal_stats_offset.rx_bytes,
2596                         &pf->internal_stats.rx_bytes);
2597
2598         i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2599                         I40E_GLV_GOTCL(hw->port),
2600                         pf->offset_loaded,
2601                         &pf->internal_stats_offset.tx_bytes,
2602                         &pf->internal_stats.tx_bytes);
2603         /* Get total internal rx packet count */
2604         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2605                             I40E_GLV_UPRCL(hw->port),
2606                             pf->offset_loaded,
2607                             &pf->internal_stats_offset.rx_unicast,
2608                             &pf->internal_stats.rx_unicast);
2609         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2610                             I40E_GLV_MPRCL(hw->port),
2611                             pf->offset_loaded,
2612                             &pf->internal_stats_offset.rx_multicast,
2613                             &pf->internal_stats.rx_multicast);
2614         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2615                             I40E_GLV_BPRCL(hw->port),
2616                             pf->offset_loaded,
2617                             &pf->internal_stats_offset.rx_broadcast,
2618                             &pf->internal_stats.rx_broadcast);
2619         /* Get total internal tx packet count */
2620         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
2621                             I40E_GLV_UPTCL(hw->port),
2622                             pf->offset_loaded,
2623                             &pf->internal_stats_offset.tx_unicast,
2624                             &pf->internal_stats.tx_unicast);
2625         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
2626                             I40E_GLV_MPTCL(hw->port),
2627                             pf->offset_loaded,
2628                             &pf->internal_stats_offset.tx_multicast,
2629                             &pf->internal_stats.tx_multicast);
2630         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
2631                             I40E_GLV_BPTCL(hw->port),
2632                             pf->offset_loaded,
2633                             &pf->internal_stats_offset.tx_broadcast,
2634                             &pf->internal_stats.tx_broadcast);
2635
2636         /* exclude CRC size */
2637         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2638                 pf->internal_stats.rx_multicast +
2639                 pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2640
2641         /* Get statistics of struct i40e_eth_stats */
2642         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2643                             I40E_GLPRT_GORCL(hw->port),
2644                             pf->offset_loaded, &os->eth.rx_bytes,
2645                             &ns->eth.rx_bytes);
2646         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2647                             I40E_GLPRT_UPRCL(hw->port),
2648                             pf->offset_loaded, &os->eth.rx_unicast,
2649                             &ns->eth.rx_unicast);
2650         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2651                             I40E_GLPRT_MPRCL(hw->port),
2652                             pf->offset_loaded, &os->eth.rx_multicast,
2653                             &ns->eth.rx_multicast);
2654         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2655                             I40E_GLPRT_BPRCL(hw->port),
2656                             pf->offset_loaded, &os->eth.rx_broadcast,
2657                             &ns->eth.rx_broadcast);
2658         /* Workaround: CRC size should not be included in byte statistics,
2659          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2660          */
2661         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2662                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2663
2664         /* exclude internal rx bytes
2665          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2666          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
2667          * value.
2668          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
2669          */
2670         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2671                 ns->eth.rx_bytes = 0;
2672         else
2673                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2674
2675         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
2676                 ns->eth.rx_unicast = 0;
2677         else
2678                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
2679
2680         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
2681                 ns->eth.rx_multicast = 0;
2682         else
2683                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
2684
2685         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
2686                 ns->eth.rx_broadcast = 0;
2687         else
2688                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
2689
2690         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2691                             pf->offset_loaded, &os->eth.rx_discards,
2692                             &ns->eth.rx_discards);
2693         /* GLPRT_REPC not supported */
2694         /* GLPRT_RMPC not supported */
2695         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2696                             pf->offset_loaded,
2697                             &os->eth.rx_unknown_protocol,
2698                             &ns->eth.rx_unknown_protocol);
2699         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2700                             I40E_GLPRT_GOTCL(hw->port),
2701                             pf->offset_loaded, &os->eth.tx_bytes,
2702                             &ns->eth.tx_bytes);
2703         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2704                             I40E_GLPRT_UPTCL(hw->port),
2705                             pf->offset_loaded, &os->eth.tx_unicast,
2706                             &ns->eth.tx_unicast);
2707         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2708                             I40E_GLPRT_MPTCL(hw->port),
2709                             pf->offset_loaded, &os->eth.tx_multicast,
2710                             &ns->eth.tx_multicast);
2711         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2712                             I40E_GLPRT_BPTCL(hw->port),
2713                             pf->offset_loaded, &os->eth.tx_broadcast,
2714                             &ns->eth.tx_broadcast);
2715         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2716                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2717
2718         /* exclude internal tx bytes
2719          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
2720          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
2721          * value.
2722          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
2723          */
2724         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
2725                 ns->eth.tx_bytes = 0;
2726         else
2727                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
2728
2729         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
2730                 ns->eth.tx_unicast = 0;
2731         else
2732                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
2733
2734         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
2735                 ns->eth.tx_multicast = 0;
2736         else
2737                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
2738
2739         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
2740                 ns->eth.tx_broadcast = 0;
2741         else
2742                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
2743
2744         /* GLPRT_TEPC not supported */
2745
2746         /* additional port specific stats */
2747         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2748                             pf->offset_loaded, &os->tx_dropped_link_down,
2749                             &ns->tx_dropped_link_down);
2750         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2751                             pf->offset_loaded, &os->crc_errors,
2752                             &ns->crc_errors);
2753         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2754                             pf->offset_loaded, &os->illegal_bytes,
2755                             &ns->illegal_bytes);
2756         /* GLPRT_ERRBC not supported */
2757         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2758                             pf->offset_loaded, &os->mac_local_faults,
2759                             &ns->mac_local_faults);
2760         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2761                             pf->offset_loaded, &os->mac_remote_faults,
2762                             &ns->mac_remote_faults);
2763         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2764                             pf->offset_loaded, &os->rx_length_errors,
2765                             &ns->rx_length_errors);
2766         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2767                             pf->offset_loaded, &os->link_xon_rx,
2768                             &ns->link_xon_rx);
2769         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2770                             pf->offset_loaded, &os->link_xoff_rx,
2771                             &ns->link_xoff_rx);
2772         for (i = 0; i < 8; i++) {
2773                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2774                                     pf->offset_loaded,
2775                                     &os->priority_xon_rx[i],
2776                                     &ns->priority_xon_rx[i]);
2777                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2778                                     pf->offset_loaded,
2779                                     &os->priority_xoff_rx[i],
2780                                     &ns->priority_xoff_rx[i]);
2781         }
2782         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2783                             pf->offset_loaded, &os->link_xon_tx,
2784                             &ns->link_xon_tx);
2785         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2786                             pf->offset_loaded, &os->link_xoff_tx,
2787                             &ns->link_xoff_tx);
2788         for (i = 0; i < 8; i++) {
2789                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2790                                     pf->offset_loaded,
2791                                     &os->priority_xon_tx[i],
2792                                     &ns->priority_xon_tx[i]);
2793                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2794                                     pf->offset_loaded,
2795                                     &os->priority_xoff_tx[i],
2796                                     &ns->priority_xoff_tx[i]);
2797                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2798                                     pf->offset_loaded,
2799                                     &os->priority_xon_2_xoff[i],
2800                                     &ns->priority_xon_2_xoff[i]);
2801         }
2802         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2803                             I40E_GLPRT_PRC64L(hw->port),
2804                             pf->offset_loaded, &os->rx_size_64,
2805                             &ns->rx_size_64);
2806         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2807                             I40E_GLPRT_PRC127L(hw->port),
2808                             pf->offset_loaded, &os->rx_size_127,
2809                             &ns->rx_size_127);
2810         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2811                             I40E_GLPRT_PRC255L(hw->port),
2812                             pf->offset_loaded, &os->rx_size_255,
2813                             &ns->rx_size_255);
2814         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2815                             I40E_GLPRT_PRC511L(hw->port),
2816                             pf->offset_loaded, &os->rx_size_511,
2817                             &ns->rx_size_511);
2818         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2819                             I40E_GLPRT_PRC1023L(hw->port),
2820                             pf->offset_loaded, &os->rx_size_1023,
2821                             &ns->rx_size_1023);
2822         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2823                             I40E_GLPRT_PRC1522L(hw->port),
2824                             pf->offset_loaded, &os->rx_size_1522,
2825                             &ns->rx_size_1522);
2826         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2827                             I40E_GLPRT_PRC9522L(hw->port),
2828                             pf->offset_loaded, &os->rx_size_big,
2829                             &ns->rx_size_big);
2830         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2831                             pf->offset_loaded, &os->rx_undersize,
2832                             &ns->rx_undersize);
2833         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2834                             pf->offset_loaded, &os->rx_fragments,
2835                             &ns->rx_fragments);
2836         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2837                             pf->offset_loaded, &os->rx_oversize,
2838                             &ns->rx_oversize);
2839         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2840                             pf->offset_loaded, &os->rx_jabber,
2841                             &ns->rx_jabber);
2842         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2843                             I40E_GLPRT_PTC64L(hw->port),
2844                             pf->offset_loaded, &os->tx_size_64,
2845                             &ns->tx_size_64);
2846         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2847                             I40E_GLPRT_PTC127L(hw->port),
2848                             pf->offset_loaded, &os->tx_size_127,
2849                             &ns->tx_size_127);
2850         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2851                             I40E_GLPRT_PTC255L(hw->port),
2852                             pf->offset_loaded, &os->tx_size_255,
2853                             &ns->tx_size_255);
2854         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2855                             I40E_GLPRT_PTC511L(hw->port),
2856                             pf->offset_loaded, &os->tx_size_511,
2857                             &ns->tx_size_511);
2858         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2859                             I40E_GLPRT_PTC1023L(hw->port),
2860                             pf->offset_loaded, &os->tx_size_1023,
2861                             &ns->tx_size_1023);
2862         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2863                             I40E_GLPRT_PTC1522L(hw->port),
2864                             pf->offset_loaded, &os->tx_size_1522,
2865                             &ns->tx_size_1522);
2866         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2867                             I40E_GLPRT_PTC9522L(hw->port),
2868                             pf->offset_loaded, &os->tx_size_big,
2869                             &ns->tx_size_big);
2870         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2871                            pf->offset_loaded,
2872                            &os->fd_sb_match, &ns->fd_sb_match);
2873         /* GLPRT_MSPDC not supported */
2874         /* GLPRT_XEC not supported */
2875
2876         pf->offset_loaded = true;
2877
2878         if (pf->main_vsi)
2879                 i40e_update_vsi_stats(pf->main_vsi);
2880 }
2881
2882 /* Get all statistics of a port */
2883 static int
2884 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2885 {
2886         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2887         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2888         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2889         unsigned i;
2890
2891         /* call read registers - updates values, now write them to struct */
2892         i40e_read_stats_registers(pf, hw);
2893
2894         stats->ipackets = ns->eth.rx_unicast +
2895                         ns->eth.rx_multicast +
2896                         ns->eth.rx_broadcast -
2897                         ns->eth.rx_discards -
2898                         pf->main_vsi->eth_stats.rx_discards;
2899         stats->opackets = ns->eth.tx_unicast +
2900                         ns->eth.tx_multicast +
2901                         ns->eth.tx_broadcast;
2902         stats->ibytes   = ns->eth.rx_bytes;
2903         stats->obytes   = ns->eth.tx_bytes;
2904         stats->oerrors  = ns->eth.tx_errors +
2905                         pf->main_vsi->eth_stats.tx_errors;
2906
2907         /* Rx Errors */
2908         stats->imissed  = ns->eth.rx_discards +
2909                         pf->main_vsi->eth_stats.rx_discards;
2910         stats->ierrors  = ns->crc_errors +
2911                         ns->rx_length_errors + ns->rx_undersize +
2912                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2913
2914         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2915         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2916         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2917         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2918         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2919         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2920         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2921                     ns->eth.rx_unknown_protocol);
2922         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2923         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2924         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2925         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2926         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2927         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2928
2929         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2930                     ns->tx_dropped_link_down);
2931         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2932         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2933                     ns->illegal_bytes);
2934         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2935         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2936                     ns->mac_local_faults);
2937         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2938                     ns->mac_remote_faults);
2939         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2940                     ns->rx_length_errors);
2941         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2942         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2943         for (i = 0; i < 8; i++) {
2944                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
2945                                 i, ns->priority_xon_rx[i]);
2946                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
2947                                 i, ns->priority_xoff_rx[i]);
2948         }
2949         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
2950         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
2951         for (i = 0; i < 8; i++) {
2952                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
2953                                 i, ns->priority_xon_tx[i]);
2954                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
2955                                 i, ns->priority_xoff_tx[i]);
2956                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
2957                                 i, ns->priority_xon_2_xoff[i]);
2958         }
2959         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
2960         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
2961         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
2962         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
2963         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
2964         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
2965         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
2966         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
2967         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
2968         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
2969         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
2970         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
2971         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
2972         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
2973         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
2974         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
2975         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
2976         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
2977         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2978                         ns->mac_short_packet_dropped);
2979         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
2980                     ns->checksum_error);
2981         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
2982         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2983         return 0;
2984 }
2985
2986 /* Reset the statistics */
2987 static void
2988 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2989 {
2990         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2991         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2992
2993         /* Mark PF and VSI stats to update the offset, aka "reset" */
2994         pf->offset_loaded = false;
2995         if (pf->main_vsi)
2996                 pf->main_vsi->offset_loaded = false;
2997
2998         /* read the stats, reading current register values into offset */
2999         i40e_read_stats_registers(pf, hw);
3000 }
3001
3002 static uint32_t
3003 i40e_xstats_calc_num(void)
3004 {
3005         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3006                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3007                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3008 }
3009
3010 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3011                                      struct rte_eth_xstat_name *xstats_names,
3012                                      __rte_unused unsigned limit)
3013 {
3014         unsigned count = 0;
3015         unsigned i, prio;
3016
3017         if (xstats_names == NULL)
3018                 return i40e_xstats_calc_num();
3019
3020         /* Note: limit checked in rte_eth_xstats_names() */
3021
3022         /* Get stats from i40e_eth_stats struct */
3023         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3024                 snprintf(xstats_names[count].name,
3025                          sizeof(xstats_names[count].name),
3026                          "%s", rte_i40e_stats_strings[i].name);
3027                 count++;
3028         }
3029
3030         /* Get individiual stats from i40e_hw_port struct */
3031         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3032                 snprintf(xstats_names[count].name,
3033                         sizeof(xstats_names[count].name),
3034                          "%s", rte_i40e_hw_port_strings[i].name);
3035                 count++;
3036         }
3037
3038         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3039                 for (prio = 0; prio < 8; prio++) {
3040                         snprintf(xstats_names[count].name,
3041                                  sizeof(xstats_names[count].name),
3042                                  "rx_priority%u_%s", prio,
3043                                  rte_i40e_rxq_prio_strings[i].name);
3044                         count++;
3045                 }
3046         }
3047
3048         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3049                 for (prio = 0; prio < 8; prio++) {
3050                         snprintf(xstats_names[count].name,
3051                                  sizeof(xstats_names[count].name),
3052                                  "tx_priority%u_%s", prio,
3053                                  rte_i40e_txq_prio_strings[i].name);
3054                         count++;
3055                 }
3056         }
3057         return count;
3058 }
3059
3060 static int
3061 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3062                     unsigned n)
3063 {
3064         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3065         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3066         unsigned i, count, prio;
3067         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3068
3069         count = i40e_xstats_calc_num();
3070         if (n < count)
3071                 return count;
3072
3073         i40e_read_stats_registers(pf, hw);
3074
3075         if (xstats == NULL)
3076                 return 0;
3077
3078         count = 0;
3079
3080         /* Get stats from i40e_eth_stats struct */
3081         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3082                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3083                         rte_i40e_stats_strings[i].offset);
3084                 xstats[count].id = count;
3085                 count++;
3086         }
3087
3088         /* Get individiual stats from i40e_hw_port struct */
3089         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3090                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3091                         rte_i40e_hw_port_strings[i].offset);
3092                 xstats[count].id = count;
3093                 count++;
3094         }
3095
3096         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3097                 for (prio = 0; prio < 8; prio++) {
3098                         xstats[count].value =
3099                                 *(uint64_t *)(((char *)hw_stats) +
3100                                 rte_i40e_rxq_prio_strings[i].offset +
3101                                 (sizeof(uint64_t) * prio));
3102                         xstats[count].id = count;
3103                         count++;
3104                 }
3105         }
3106
3107         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3108                 for (prio = 0; prio < 8; prio++) {
3109                         xstats[count].value =
3110                                 *(uint64_t *)(((char *)hw_stats) +
3111                                 rte_i40e_txq_prio_strings[i].offset +
3112                                 (sizeof(uint64_t) * prio));
3113                         xstats[count].id = count;
3114                         count++;
3115                 }
3116         }
3117
3118         return count;
3119 }
3120
3121 static int
3122 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
3123                                  __rte_unused uint16_t queue_id,
3124                                  __rte_unused uint8_t stat_idx,
3125                                  __rte_unused uint8_t is_rx)
3126 {
3127         PMD_INIT_FUNC_TRACE();
3128
3129         return -ENOSYS;
3130 }
3131
3132 static int
3133 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3134 {
3135         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3136         u32 full_ver;
3137         u8 ver, patch;
3138         u16 build;
3139         int ret;
3140
3141         full_ver = hw->nvm.oem_ver;
3142         ver = (u8)(full_ver >> 24);
3143         build = (u16)((full_ver >> 8) & 0xffff);
3144         patch = (u8)(full_ver & 0xff);
3145
3146         ret = snprintf(fw_version, fw_size,
3147                  "%d.%d%d 0x%08x %d.%d.%d",
3148                  ((hw->nvm.version >> 12) & 0xf),
3149                  ((hw->nvm.version >> 4) & 0xff),
3150                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3151                  ver, build, patch);
3152
3153         ret += 1; /* add the size of '\0' */
3154         if (fw_size < (u32)ret)
3155                 return ret;
3156         else
3157                 return 0;
3158 }
3159
3160 static void
3161 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3162 {
3163         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3164         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3165         struct i40e_vsi *vsi = pf->main_vsi;
3166         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3167
3168         dev_info->pci_dev = pci_dev;
3169         dev_info->max_rx_queues = vsi->nb_qps;
3170         dev_info->max_tx_queues = vsi->nb_qps;
3171         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3172         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3173         dev_info->max_mac_addrs = vsi->max_macaddrs;
3174         dev_info->max_vfs = pci_dev->max_vfs;
3175         dev_info->rx_offload_capa =
3176                 DEV_RX_OFFLOAD_VLAN_STRIP |
3177                 DEV_RX_OFFLOAD_QINQ_STRIP |
3178                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3179                 DEV_RX_OFFLOAD_UDP_CKSUM |
3180                 DEV_RX_OFFLOAD_TCP_CKSUM |
3181                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3182                 DEV_RX_OFFLOAD_CRC_STRIP;
3183         dev_info->tx_offload_capa =
3184                 DEV_TX_OFFLOAD_VLAN_INSERT |
3185                 DEV_TX_OFFLOAD_QINQ_INSERT |
3186                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3187                 DEV_TX_OFFLOAD_UDP_CKSUM |
3188                 DEV_TX_OFFLOAD_TCP_CKSUM |
3189                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3190                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3191                 DEV_TX_OFFLOAD_TCP_TSO |
3192                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3193                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3194                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3195                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
3196         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3197                                                 sizeof(uint32_t);
3198         dev_info->reta_size = pf->hash_lut_size;
3199         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3200
3201         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3202                 .rx_thresh = {
3203                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3204                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3205                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3206                 },
3207                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3208                 .rx_drop_en = 0,
3209         };
3210
3211         dev_info->default_txconf = (struct rte_eth_txconf) {
3212                 .tx_thresh = {
3213                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3214                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3215                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3216                 },
3217                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3218                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3219                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3220                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3221         };
3222
3223         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3224                 .nb_max = I40E_MAX_RING_DESC,
3225                 .nb_min = I40E_MIN_RING_DESC,
3226                 .nb_align = I40E_ALIGN_RING_DESC,
3227         };
3228
3229         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3230                 .nb_max = I40E_MAX_RING_DESC,
3231                 .nb_min = I40E_MIN_RING_DESC,
3232                 .nb_align = I40E_ALIGN_RING_DESC,
3233                 .nb_seg_max = I40E_TX_MAX_SEG,
3234                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3235         };
3236
3237         if (pf->flags & I40E_FLAG_VMDQ) {
3238                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3239                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3240                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3241                                                 pf->max_nb_vmdq_vsi;
3242                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3243                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3244                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3245         }
3246
3247         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
3248                 /* For XL710 */
3249                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3250         else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
3251                 /* For XXV710 */
3252                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3253         else
3254                 /* For X710 */
3255                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3256 }
3257
3258 static int
3259 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3260 {
3261         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3262         struct i40e_vsi *vsi = pf->main_vsi;
3263         PMD_INIT_FUNC_TRACE();
3264
3265         if (on)
3266                 return i40e_vsi_add_vlan(vsi, vlan_id);
3267         else
3268                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3269 }
3270
3271 static int
3272 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3273                                 enum rte_vlan_type vlan_type,
3274                                 uint16_t tpid, int qinq)
3275 {
3276         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3277         uint64_t reg_r = 0;
3278         uint64_t reg_w = 0;
3279         uint16_t reg_id = 3;
3280         int ret;
3281
3282         if (qinq) {
3283                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3284                         reg_id = 2;
3285         }
3286
3287         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3288                                           &reg_r, NULL);
3289         if (ret != I40E_SUCCESS) {
3290                 PMD_DRV_LOG(ERR,
3291                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3292                            reg_id);
3293                 return -EIO;
3294         }
3295         PMD_DRV_LOG(DEBUG,
3296                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3297                     reg_id, reg_r);
3298
3299         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3300         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3301         if (reg_r == reg_w) {
3302                 PMD_DRV_LOG(DEBUG, "No need to write");
3303                 return 0;
3304         }
3305
3306         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3307                                            reg_w, NULL);
3308         if (ret != I40E_SUCCESS) {
3309                 PMD_DRV_LOG(ERR,
3310                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3311                             reg_id);
3312                 return -EIO;
3313         }
3314         PMD_DRV_LOG(DEBUG,
3315                     "Global register 0x%08x is changed with value 0x%08x",
3316                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3317
3318         return 0;
3319 }
3320
3321 static int
3322 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3323                    enum rte_vlan_type vlan_type,
3324                    uint16_t tpid)
3325 {
3326         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3327         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3328         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
3329         int ret = 0;
3330
3331         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3332              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3333             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3334                 PMD_DRV_LOG(ERR,
3335                             "Unsupported vlan type.");
3336                 return -EINVAL;
3337         }
3338
3339         if (pf->support_multi_driver) {
3340                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3341                 return -ENOTSUP;
3342         }
3343
3344         /* 802.1ad frames ability is added in NVM API 1.7*/
3345         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3346                 if (qinq) {
3347                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3348                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3349                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3350                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3351                 } else {
3352                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3353                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3354                 }
3355                 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3356                 if (ret != I40E_SUCCESS) {
3357                         PMD_DRV_LOG(ERR,
3358                                     "Set switch config failed aq_err: %d",
3359                                     hw->aq.asq_last_status);
3360                         ret = -EIO;
3361                 }
3362         } else
3363                 /* If NVM API < 1.7, keep the register setting */
3364                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3365                                                       tpid, qinq);
3366         i40e_global_cfg_warning(I40E_WARNING_TPID);
3367
3368         return ret;
3369 }
3370
3371 static int
3372 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3373 {
3374         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3375         struct i40e_vsi *vsi = pf->main_vsi;
3376
3377         if (mask & ETH_VLAN_FILTER_MASK) {
3378                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3379                         i40e_vsi_config_vlan_filter(vsi, TRUE);
3380                 else
3381                         i40e_vsi_config_vlan_filter(vsi, FALSE);
3382         }
3383
3384         if (mask & ETH_VLAN_STRIP_MASK) {
3385                 /* Enable or disable VLAN stripping */
3386                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
3387                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
3388                 else
3389                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
3390         }
3391
3392         if (mask & ETH_VLAN_EXTEND_MASK) {
3393                 if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
3394                         i40e_vsi_config_double_vlan(vsi, TRUE);
3395                         /* Set global registers with default ethertype. */
3396                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3397                                            ETHER_TYPE_VLAN);
3398                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3399                                            ETHER_TYPE_VLAN);
3400                 }
3401                 else
3402                         i40e_vsi_config_double_vlan(vsi, FALSE);
3403         }
3404
3405         return 0;
3406 }
3407
3408 static void
3409 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3410                           __rte_unused uint16_t queue,
3411                           __rte_unused int on)
3412 {
3413         PMD_INIT_FUNC_TRACE();
3414 }
3415
3416 static int
3417 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3418 {
3419         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3420         struct i40e_vsi *vsi = pf->main_vsi;
3421         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3422         struct i40e_vsi_vlan_pvid_info info;
3423
3424         memset(&info, 0, sizeof(info));
3425         info.on = on;
3426         if (info.on)
3427                 info.config.pvid = pvid;
3428         else {
3429                 info.config.reject.tagged =
3430                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
3431                 info.config.reject.untagged =
3432                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
3433         }
3434
3435         return i40e_vsi_vlan_pvid_set(vsi, &info);
3436 }
3437
3438 static int
3439 i40e_dev_led_on(struct rte_eth_dev *dev)
3440 {
3441         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3442         uint32_t mode = i40e_led_get(hw);
3443
3444         if (mode == 0)
3445                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3446
3447         return 0;
3448 }
3449
3450 static int
3451 i40e_dev_led_off(struct rte_eth_dev *dev)
3452 {
3453         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3454         uint32_t mode = i40e_led_get(hw);
3455
3456         if (mode != 0)
3457                 i40e_led_set(hw, 0, false);
3458
3459         return 0;
3460 }
3461
3462 static int
3463 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3464 {
3465         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3466         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3467
3468         fc_conf->pause_time = pf->fc_conf.pause_time;
3469
3470         /* read out from register, in case they are modified by other port */
3471         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3472                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3473         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3474                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3475
3476         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3477         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3478
3479          /* Return current mode according to actual setting*/
3480         switch (hw->fc.current_mode) {
3481         case I40E_FC_FULL:
3482                 fc_conf->mode = RTE_FC_FULL;
3483                 break;
3484         case I40E_FC_TX_PAUSE:
3485                 fc_conf->mode = RTE_FC_TX_PAUSE;
3486                 break;
3487         case I40E_FC_RX_PAUSE:
3488                 fc_conf->mode = RTE_FC_RX_PAUSE;
3489                 break;
3490         case I40E_FC_NONE:
3491         default:
3492                 fc_conf->mode = RTE_FC_NONE;
3493         };
3494
3495         return 0;
3496 }
3497
3498 static int
3499 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3500 {
3501         uint32_t mflcn_reg, fctrl_reg, reg;
3502         uint32_t max_high_water;
3503         uint8_t i, aq_failure;
3504         int err;
3505         struct i40e_hw *hw;
3506         struct i40e_pf *pf;
3507         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3508                 [RTE_FC_NONE] = I40E_FC_NONE,
3509                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3510                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3511                 [RTE_FC_FULL] = I40E_FC_FULL
3512         };
3513
3514         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3515
3516         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3517         if ((fc_conf->high_water > max_high_water) ||
3518                         (fc_conf->high_water < fc_conf->low_water)) {
3519                 PMD_INIT_LOG(ERR,
3520                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
3521                         max_high_water);
3522                 return -EINVAL;
3523         }
3524
3525         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3526         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3527         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3528
3529         pf->fc_conf.pause_time = fc_conf->pause_time;
3530         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3531         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3532
3533         PMD_INIT_FUNC_TRACE();
3534
3535         /* All the link flow control related enable/disable register
3536          * configuration is handle by the F/W
3537          */
3538         err = i40e_set_fc(hw, &aq_failure, true);
3539         if (err < 0)
3540                 return -ENOSYS;
3541
3542         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3543                 /* Configure flow control refresh threshold,
3544                  * the value for stat_tx_pause_refresh_timer[8]
3545                  * is used for global pause operation.
3546                  */
3547
3548                 I40E_WRITE_REG(hw,
3549                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3550                                pf->fc_conf.pause_time);
3551
3552                 /* configure the timer value included in transmitted pause
3553                  * frame,
3554                  * the value for stat_tx_pause_quanta[8] is used for global
3555                  * pause operation
3556                  */
3557                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3558                                pf->fc_conf.pause_time);
3559
3560                 fctrl_reg = I40E_READ_REG(hw,
3561                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3562
3563                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3564                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3565                 else
3566                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3567
3568                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3569                                fctrl_reg);
3570         } else {
3571                 /* Configure pause time (2 TCs per register) */
3572                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3573                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3574                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3575
3576                 /* Configure flow control refresh threshold value */
3577                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3578                                pf->fc_conf.pause_time / 2);
3579
3580                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3581
3582                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3583                  *depending on configuration
3584                  */
3585                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3586                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3587                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3588                 } else {
3589                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3590                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3591                 }
3592
3593                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3594         }
3595
3596         if (!pf->support_multi_driver) {
3597                 /* config water marker both based on the packets and bytes */
3598                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
3599                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3600                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3601                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
3602                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3603                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3604                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
3605                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3606                                   << I40E_KILOSHIFT);
3607                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
3608                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3609                                    << I40E_KILOSHIFT);
3610                 i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
3611         } else {
3612                 PMD_DRV_LOG(ERR,
3613                             "Water marker configuration is not supported.");
3614         }
3615
3616         I40E_WRITE_FLUSH(hw);
3617
3618         return 0;
3619 }
3620
3621 static int
3622 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3623                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3624 {
3625         PMD_INIT_FUNC_TRACE();
3626
3627         return -ENOSYS;
3628 }
3629
3630 /* Add a MAC address, and update filters */
3631 static int
3632 i40e_macaddr_add(struct rte_eth_dev *dev,
3633                  struct ether_addr *mac_addr,
3634                  __rte_unused uint32_t index,
3635                  uint32_t pool)
3636 {
3637         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3638         struct i40e_mac_filter_info mac_filter;
3639         struct i40e_vsi *vsi;
3640         int ret;
3641
3642         /* If VMDQ not enabled or configured, return */
3643         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3644                           !pf->nb_cfg_vmdq_vsi)) {
3645                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3646                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3647                         pool);
3648                 return -ENOTSUP;
3649         }
3650
3651         if (pool > pf->nb_cfg_vmdq_vsi) {
3652                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3653                                 pool, pf->nb_cfg_vmdq_vsi);
3654                 return -EINVAL;
3655         }
3656
3657         rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3658         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3659                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3660         else
3661                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3662
3663         if (pool == 0)
3664                 vsi = pf->main_vsi;
3665         else
3666                 vsi = pf->vmdq[pool - 1].vsi;
3667
3668         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3669         if (ret != I40E_SUCCESS) {
3670                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3671                 return -ENODEV;
3672         }
3673         return 0;
3674 }
3675
3676 /* Remove a MAC address, and update filters */
3677 static void
3678 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3679 {
3680         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3681         struct i40e_vsi *vsi;
3682         struct rte_eth_dev_data *data = dev->data;
3683         struct ether_addr *macaddr;
3684         int ret;
3685         uint32_t i;
3686         uint64_t pool_sel;
3687
3688         macaddr = &(data->mac_addrs[index]);
3689
3690         pool_sel = dev->data->mac_pool_sel[index];
3691
3692         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3693                 if (pool_sel & (1ULL << i)) {
3694                         if (i == 0)
3695                                 vsi = pf->main_vsi;
3696                         else {
3697                                 /* No VMDQ pool enabled or configured */
3698                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3699                                         (i > pf->nb_cfg_vmdq_vsi)) {
3700                                         PMD_DRV_LOG(ERR,
3701                                                 "No VMDQ pool enabled/configured");
3702                                         return;
3703                                 }
3704                                 vsi = pf->vmdq[i - 1].vsi;
3705                         }
3706                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3707
3708                         if (ret) {
3709                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3710                                 return;
3711                         }
3712                 }
3713         }
3714 }
3715
3716 /* Set perfect match or hash match of MAC and VLAN for a VF */
3717 static int
3718 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3719                  struct rte_eth_mac_filter *filter,
3720                  bool add)
3721 {
3722         struct i40e_hw *hw;
3723         struct i40e_mac_filter_info mac_filter;
3724         struct ether_addr old_mac;
3725         struct ether_addr *new_mac;
3726         struct i40e_pf_vf *vf = NULL;
3727         uint16_t vf_id;
3728         int ret;
3729
3730         if (pf == NULL) {
3731                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3732                 return -EINVAL;
3733         }
3734         hw = I40E_PF_TO_HW(pf);
3735
3736         if (filter == NULL) {
3737                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3738                 return -EINVAL;
3739         }
3740
3741         new_mac = &filter->mac_addr;
3742
3743         if (is_zero_ether_addr(new_mac)) {
3744                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3745                 return -EINVAL;
3746         }
3747
3748         vf_id = filter->dst_id;
3749
3750         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3751                 PMD_DRV_LOG(ERR, "Invalid argument.");
3752                 return -EINVAL;
3753         }
3754         vf = &pf->vfs[vf_id];
3755
3756         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3757                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3758                 return -EINVAL;
3759         }
3760
3761         if (add) {
3762                 rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3763                 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3764                                 ETHER_ADDR_LEN);
3765                 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3766                                  ETHER_ADDR_LEN);
3767
3768                 mac_filter.filter_type = filter->filter_type;
3769                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3770                 if (ret != I40E_SUCCESS) {
3771                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3772                         return -1;
3773                 }
3774                 ether_addr_copy(new_mac, &pf->dev_addr);
3775         } else {
3776                 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3777                                 ETHER_ADDR_LEN);
3778                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3779                 if (ret != I40E_SUCCESS) {
3780                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3781                         return -1;
3782                 }
3783
3784                 /* Clear device address as it has been removed */
3785                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3786                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3787         }
3788
3789         return 0;
3790 }
3791
3792 /* MAC filter handle */
3793 static int
3794 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3795                 void *arg)
3796 {
3797         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3798         struct rte_eth_mac_filter *filter;
3799         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3800         int ret = I40E_NOT_SUPPORTED;
3801
3802         filter = (struct rte_eth_mac_filter *)(arg);
3803
3804         switch (filter_op) {
3805         case RTE_ETH_FILTER_NOP:
3806                 ret = I40E_SUCCESS;
3807                 break;
3808         case RTE_ETH_FILTER_ADD:
3809                 i40e_pf_disable_irq0(hw);
3810                 if (filter->is_vf)
3811                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3812                 i40e_pf_enable_irq0(hw);
3813                 break;
3814         case RTE_ETH_FILTER_DELETE:
3815                 i40e_pf_disable_irq0(hw);
3816                 if (filter->is_vf)
3817                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3818                 i40e_pf_enable_irq0(hw);
3819                 break;
3820         default:
3821                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3822                 ret = I40E_ERR_PARAM;
3823                 break;
3824         }
3825
3826         return ret;
3827 }
3828
3829 static int
3830 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3831 {
3832         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3833         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3834         uint32_t reg;
3835         int ret;
3836
3837         if (!lut)
3838                 return -EINVAL;
3839
3840         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3841                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3842                                           lut, lut_size);
3843                 if (ret) {
3844                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3845                         return ret;
3846                 }
3847         } else {
3848                 uint32_t *lut_dw = (uint32_t *)lut;
3849                 uint16_t i, lut_size_dw = lut_size / 4;
3850
3851                 if (vsi->type == I40E_VSI_SRIOV) {
3852                         for (i = 0; i <= lut_size_dw; i++) {
3853                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
3854                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
3855                         }
3856                 } else {
3857                         for (i = 0; i < lut_size_dw; i++)
3858                                 lut_dw[i] = I40E_READ_REG(hw,
3859                                                           I40E_PFQF_HLUT(i));
3860                 }
3861         }
3862
3863         return 0;
3864 }
3865
3866 int
3867 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3868 {
3869         struct i40e_pf *pf;
3870         struct i40e_hw *hw;
3871         int ret;
3872
3873         if (!vsi || !lut)
3874                 return -EINVAL;
3875
3876         pf = I40E_VSI_TO_PF(vsi);
3877         hw = I40E_VSI_TO_HW(vsi);
3878
3879         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3880                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
3881                                           lut, lut_size);
3882                 if (ret) {
3883                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3884                         return ret;
3885                 }
3886         } else {
3887                 uint32_t *lut_dw = (uint32_t *)lut;
3888                 uint16_t i, lut_size_dw = lut_size / 4;
3889
3890                 if (vsi->type == I40E_VSI_SRIOV) {
3891                         for (i = 0; i < lut_size_dw; i++)
3892                                 I40E_WRITE_REG(
3893                                         hw,
3894                                         I40E_VFQF_HLUT1(i, vsi->user_param),
3895                                         lut_dw[i]);
3896                 } else {
3897                         for (i = 0; i < lut_size_dw; i++)
3898                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
3899                                                lut_dw[i]);
3900                 }
3901                 I40E_WRITE_FLUSH(hw);
3902         }
3903
3904         return 0;
3905 }
3906
3907 static int
3908 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
3909                          struct rte_eth_rss_reta_entry64 *reta_conf,
3910                          uint16_t reta_size)
3911 {
3912         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3913         uint16_t i, lut_size = pf->hash_lut_size;
3914         uint16_t idx, shift;
3915         uint8_t *lut;
3916         int ret;
3917
3918         if (reta_size != lut_size ||
3919                 reta_size > ETH_RSS_RETA_SIZE_512) {
3920                 PMD_DRV_LOG(ERR,
3921                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3922                         reta_size, lut_size);
3923                 return -EINVAL;
3924         }
3925
3926         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3927         if (!lut) {
3928                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3929                 return -ENOMEM;
3930         }
3931         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3932         if (ret)
3933                 goto out;
3934         for (i = 0; i < reta_size; i++) {
3935                 idx = i / RTE_RETA_GROUP_SIZE;
3936                 shift = i % RTE_RETA_GROUP_SIZE;
3937                 if (reta_conf[idx].mask & (1ULL << shift))
3938                         lut[i] = reta_conf[idx].reta[shift];
3939         }
3940         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
3941
3942 out:
3943         rte_free(lut);
3944
3945         return ret;
3946 }
3947
3948 static int
3949 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
3950                         struct rte_eth_rss_reta_entry64 *reta_conf,
3951                         uint16_t reta_size)
3952 {
3953         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3954         uint16_t i, lut_size = pf->hash_lut_size;
3955         uint16_t idx, shift;
3956         uint8_t *lut;
3957         int ret;
3958
3959         if (reta_size != lut_size ||
3960                 reta_size > ETH_RSS_RETA_SIZE_512) {
3961                 PMD_DRV_LOG(ERR,
3962                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3963                         reta_size, lut_size);
3964                 return -EINVAL;
3965         }
3966
3967         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3968         if (!lut) {
3969                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3970                 return -ENOMEM;
3971         }
3972
3973         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3974         if (ret)
3975                 goto out;
3976         for (i = 0; i < reta_size; i++) {
3977                 idx = i / RTE_RETA_GROUP_SIZE;
3978                 shift = i % RTE_RETA_GROUP_SIZE;
3979                 if (reta_conf[idx].mask & (1ULL << shift))
3980                         reta_conf[idx].reta[shift] = lut[i];
3981         }
3982
3983 out:
3984         rte_free(lut);
3985
3986         return ret;
3987 }
3988
3989 /**
3990  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
3991  * @hw:   pointer to the HW structure
3992  * @mem:  pointer to mem struct to fill out
3993  * @size: size of memory requested
3994  * @alignment: what to align the allocation to
3995  **/
3996 enum i40e_status_code
3997 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3998                         struct i40e_dma_mem *mem,
3999                         u64 size,
4000                         u32 alignment)
4001 {
4002         const struct rte_memzone *mz = NULL;
4003         char z_name[RTE_MEMZONE_NAMESIZE];
4004
4005         if (!mem)
4006                 return I40E_ERR_PARAM;
4007
4008         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4009         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
4010                                          alignment, RTE_PGSIZE_2M);
4011         if (!mz)
4012                 return I40E_ERR_NO_MEMORY;
4013
4014         mem->size = size;
4015         mem->va = mz->addr;
4016         mem->pa = mz->iova;
4017         mem->zone = (const void *)mz;
4018         PMD_DRV_LOG(DEBUG,
4019                 "memzone %s allocated with physical address: %"PRIu64,
4020                 mz->name, mem->pa);
4021
4022         return I40E_SUCCESS;
4023 }
4024
4025 /**
4026  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4027  * @hw:   pointer to the HW structure
4028  * @mem:  ptr to mem struct to free
4029  **/
4030 enum i40e_status_code
4031 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4032                     struct i40e_dma_mem *mem)
4033 {
4034         if (!mem)
4035                 return I40E_ERR_PARAM;
4036
4037         PMD_DRV_LOG(DEBUG,
4038                 "memzone %s to be freed with physical address: %"PRIu64,
4039                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4040         rte_memzone_free((const struct rte_memzone *)mem->zone);
4041         mem->zone = NULL;
4042         mem->va = NULL;
4043         mem->pa = (u64)0;
4044
4045         return I40E_SUCCESS;
4046 }
4047
4048 /**
4049  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4050  * @hw:   pointer to the HW structure
4051  * @mem:  pointer to mem struct to fill out
4052  * @size: size of memory requested
4053  **/
4054 enum i40e_status_code
4055 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4056                          struct i40e_virt_mem *mem,
4057                          u32 size)
4058 {
4059         if (!mem)
4060                 return I40E_ERR_PARAM;
4061
4062         mem->size = size;
4063         mem->va = rte_zmalloc("i40e", size, 0);
4064
4065         if (mem->va)
4066                 return I40E_SUCCESS;
4067         else
4068                 return I40E_ERR_NO_MEMORY;
4069 }
4070
4071 /**
4072  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4073  * @hw:   pointer to the HW structure
4074  * @mem:  pointer to mem struct to free
4075  **/
4076 enum i40e_status_code
4077 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4078                      struct i40e_virt_mem *mem)
4079 {
4080         if (!mem)
4081                 return I40E_ERR_PARAM;
4082
4083         rte_free(mem->va);
4084         mem->va = NULL;
4085
4086         return I40E_SUCCESS;
4087 }
4088
4089 void
4090 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4091 {
4092         rte_spinlock_init(&sp->spinlock);
4093 }
4094
4095 void
4096 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4097 {
4098         rte_spinlock_lock(&sp->spinlock);
4099 }
4100
4101 void
4102 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4103 {
4104         rte_spinlock_unlock(&sp->spinlock);
4105 }
4106
4107 void
4108 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
4109 {
4110         return;
4111 }
4112
4113 /**
4114  * Get the hardware capabilities, which will be parsed
4115  * and saved into struct i40e_hw.
4116  */
4117 static int
4118 i40e_get_cap(struct i40e_hw *hw)
4119 {
4120         struct i40e_aqc_list_capabilities_element_resp *buf;
4121         uint16_t len, size = 0;
4122         int ret;
4123
4124         /* Calculate a huge enough buff for saving response data temporarily */
4125         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4126                                                 I40E_MAX_CAP_ELE_NUM;
4127         buf = rte_zmalloc("i40e", len, 0);
4128         if (!buf) {
4129                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4130                 return I40E_ERR_NO_MEMORY;
4131         }
4132
4133         /* Get, parse the capabilities and save it to hw */
4134         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4135                         i40e_aqc_opc_list_func_capabilities, NULL);
4136         if (ret != I40E_SUCCESS)
4137                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4138
4139         /* Free the temporary buffer after being used */
4140         rte_free(buf);
4141
4142         return ret;
4143 }
4144
4145 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4146 #define QUEUE_NUM_PER_VF_ARG                    "queue-num-per-vf"
4147
4148 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4149                 const char *value,
4150                 void *opaque)
4151 {
4152         struct i40e_pf *pf;
4153         unsigned long num;
4154         char *end;
4155
4156         pf = (struct i40e_pf *)opaque;
4157         RTE_SET_USED(key);
4158
4159         errno = 0;
4160         num = strtoul(value, &end, 0);
4161         if (errno != 0 || end == value || *end != 0) {
4162                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4163                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4164                 return -(EINVAL);
4165         }
4166
4167         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4168                 pf->vf_nb_qp_max = (uint16_t)num;
4169         else
4170                 /* here return 0 to make next valid same argument work */
4171                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4172                             "power of 2 and equal or less than 16 !, Now it is "
4173                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4174
4175         return 0;
4176 }
4177
4178 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4179 {
4180         static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
4181         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4182         struct rte_kvargs *kvlist;
4183
4184         /* set default queue number per VF as 4 */
4185         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4186
4187         if (dev->device->devargs == NULL)
4188                 return 0;
4189
4190         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4191         if (kvlist == NULL)
4192                 return -(EINVAL);
4193
4194         if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
4195                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4196                             "the first invalid or last valid one is used !",
4197                             QUEUE_NUM_PER_VF_ARG);
4198
4199         rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
4200                            i40e_pf_parse_vf_queue_number_handler, pf);
4201
4202         rte_kvargs_free(kvlist);
4203
4204         return 0;
4205 }
4206
4207 static int
4208 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4209 {
4210         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4211         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4212         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4213         uint16_t qp_count = 0, vsi_count = 0;
4214
4215         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4216                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4217                 return -EINVAL;
4218         }
4219
4220         i40e_pf_config_vf_rxq_number(dev);
4221
4222         /* Add the parameter init for LFC */
4223         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4224         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4225         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4226
4227         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4228         pf->max_num_vsi = hw->func_caps.num_vsis;
4229         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4230         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4231
4232         /* FDir queue/VSI allocation */
4233         pf->fdir_qp_offset = 0;
4234         if (hw->func_caps.fd) {
4235                 pf->flags |= I40E_FLAG_FDIR;
4236                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4237         } else {
4238                 pf->fdir_nb_qps = 0;
4239         }
4240         qp_count += pf->fdir_nb_qps;
4241         vsi_count += 1;
4242
4243         /* LAN queue/VSI allocation */
4244         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4245         if (!hw->func_caps.rss) {
4246                 pf->lan_nb_qps = 1;
4247         } else {
4248                 pf->flags |= I40E_FLAG_RSS;
4249                 if (hw->mac.type == I40E_MAC_X722)
4250                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4251                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4252         }
4253         qp_count += pf->lan_nb_qps;
4254         vsi_count += 1;
4255
4256         /* VF queue/VSI allocation */
4257         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4258         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4259                 pf->flags |= I40E_FLAG_SRIOV;
4260                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4261                 pf->vf_num = pci_dev->max_vfs;
4262                 PMD_DRV_LOG(DEBUG,
4263                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4264                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4265         } else {
4266                 pf->vf_nb_qps = 0;
4267                 pf->vf_num = 0;
4268         }
4269         qp_count += pf->vf_nb_qps * pf->vf_num;
4270         vsi_count += pf->vf_num;
4271
4272         /* VMDq queue/VSI allocation */
4273         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4274         pf->vmdq_nb_qps = 0;
4275         pf->max_nb_vmdq_vsi = 0;
4276         if (hw->func_caps.vmdq) {
4277                 if (qp_count < hw->func_caps.num_tx_qp &&
4278                         vsi_count < hw->func_caps.num_vsis) {
4279                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4280                                 qp_count) / pf->vmdq_nb_qp_max;
4281
4282                         /* Limit the maximum number of VMDq vsi to the maximum
4283                          * ethdev can support
4284                          */
4285                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4286                                 hw->func_caps.num_vsis - vsi_count);
4287                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4288                                 ETH_64_POOLS);
4289                         if (pf->max_nb_vmdq_vsi) {
4290                                 pf->flags |= I40E_FLAG_VMDQ;
4291                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4292                                 PMD_DRV_LOG(DEBUG,
4293                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4294                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4295                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4296                         } else {
4297                                 PMD_DRV_LOG(INFO,
4298                                         "No enough queues left for VMDq");
4299                         }
4300                 } else {
4301                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4302                 }
4303         }
4304         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4305         vsi_count += pf->max_nb_vmdq_vsi;
4306
4307         if (hw->func_caps.dcb)
4308                 pf->flags |= I40E_FLAG_DCB;
4309
4310         if (qp_count > hw->func_caps.num_tx_qp) {
4311                 PMD_DRV_LOG(ERR,
4312                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4313                         qp_count, hw->func_caps.num_tx_qp);
4314                 return -EINVAL;
4315         }
4316         if (vsi_count > hw->func_caps.num_vsis) {
4317                 PMD_DRV_LOG(ERR,
4318                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4319                         vsi_count, hw->func_caps.num_vsis);
4320                 return -EINVAL;
4321         }
4322
4323         return 0;
4324 }
4325
4326 static int
4327 i40e_pf_get_switch_config(struct i40e_pf *pf)
4328 {
4329         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4330         struct i40e_aqc_get_switch_config_resp *switch_config;
4331         struct i40e_aqc_switch_config_element_resp *element;
4332         uint16_t start_seid = 0, num_reported;
4333         int ret;
4334
4335         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4336                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4337         if (!switch_config) {
4338                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4339                 return -ENOMEM;
4340         }
4341
4342         /* Get the switch configurations */
4343         ret = i40e_aq_get_switch_config(hw, switch_config,
4344                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4345         if (ret != I40E_SUCCESS) {
4346                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4347                 goto fail;
4348         }
4349         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4350         if (num_reported != 1) { /* The number should be 1 */
4351                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4352                 goto fail;
4353         }
4354
4355         /* Parse the switch configuration elements */
4356         element = &(switch_config->element[0]);
4357         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4358                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4359                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4360         } else
4361                 PMD_DRV_LOG(INFO, "Unknown element type");
4362
4363 fail:
4364         rte_free(switch_config);
4365
4366         return ret;
4367 }
4368
4369 static int
4370 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4371                         uint32_t num)
4372 {
4373         struct pool_entry *entry;
4374
4375         if (pool == NULL || num == 0)
4376                 return -EINVAL;
4377
4378         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4379         if (entry == NULL) {
4380                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4381                 return -ENOMEM;
4382         }
4383
4384         /* queue heap initialize */
4385         pool->num_free = num;
4386         pool->num_alloc = 0;
4387         pool->base = base;
4388         LIST_INIT(&pool->alloc_list);
4389         LIST_INIT(&pool->free_list);
4390
4391         /* Initialize element  */
4392         entry->base = 0;
4393         entry->len = num;
4394
4395         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4396         return 0;
4397 }
4398
4399 static void
4400 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4401 {
4402         struct pool_entry *entry, *next_entry;
4403
4404         if (pool == NULL)
4405                 return;
4406
4407         for (entry = LIST_FIRST(&pool->alloc_list);
4408                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4409                         entry = next_entry) {
4410                 LIST_REMOVE(entry, next);
4411                 rte_free(entry);
4412         }
4413
4414         for (entry = LIST_FIRST(&pool->free_list);
4415                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4416                         entry = next_entry) {
4417                 LIST_REMOVE(entry, next);
4418                 rte_free(entry);
4419         }
4420
4421         pool->num_free = 0;
4422         pool->num_alloc = 0;
4423         pool->base = 0;
4424         LIST_INIT(&pool->alloc_list);
4425         LIST_INIT(&pool->free_list);
4426 }
4427
4428 static int
4429 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4430                        uint32_t base)
4431 {
4432         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4433         uint32_t pool_offset;
4434         int insert;
4435
4436         if (pool == NULL) {
4437                 PMD_DRV_LOG(ERR, "Invalid parameter");
4438                 return -EINVAL;
4439         }
4440
4441         pool_offset = base - pool->base;
4442         /* Lookup in alloc list */
4443         LIST_FOREACH(entry, &pool->alloc_list, next) {
4444                 if (entry->base == pool_offset) {
4445                         valid_entry = entry;
4446                         LIST_REMOVE(entry, next);
4447                         break;
4448                 }
4449         }
4450
4451         /* Not find, return */
4452         if (valid_entry == NULL) {
4453                 PMD_DRV_LOG(ERR, "Failed to find entry");
4454                 return -EINVAL;
4455         }
4456
4457         /**
4458          * Found it, move it to free list  and try to merge.
4459          * In order to make merge easier, always sort it by qbase.
4460          * Find adjacent prev and last entries.
4461          */
4462         prev = next = NULL;
4463         LIST_FOREACH(entry, &pool->free_list, next) {
4464                 if (entry->base > valid_entry->base) {
4465                         next = entry;
4466                         break;
4467                 }
4468                 prev = entry;
4469         }
4470
4471         insert = 0;
4472         /* Try to merge with next one*/
4473         if (next != NULL) {
4474                 /* Merge with next one */
4475                 if (valid_entry->base + valid_entry->len == next->base) {
4476                         next->base = valid_entry->base;
4477                         next->len += valid_entry->len;
4478                         rte_free(valid_entry);
4479                         valid_entry = next;
4480                         insert = 1;
4481                 }
4482         }
4483
4484         if (prev != NULL) {
4485                 /* Merge with previous one */
4486                 if (prev->base + prev->len == valid_entry->base) {
4487                         prev->len += valid_entry->len;
4488                         /* If it merge with next one, remove next node */
4489                         if (insert == 1) {
4490                                 LIST_REMOVE(valid_entry, next);
4491                                 rte_free(valid_entry);
4492                         } else {
4493                                 rte_free(valid_entry);
4494                                 insert = 1;
4495                         }
4496                 }
4497         }
4498
4499         /* Not find any entry to merge, insert */
4500         if (insert == 0) {
4501                 if (prev != NULL)
4502                         LIST_INSERT_AFTER(prev, valid_entry, next);
4503                 else if (next != NULL)
4504                         LIST_INSERT_BEFORE(next, valid_entry, next);
4505                 else /* It's empty list, insert to head */
4506                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4507         }
4508
4509         pool->num_free += valid_entry->len;
4510         pool->num_alloc -= valid_entry->len;
4511
4512         return 0;
4513 }
4514
4515 static int
4516 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4517                        uint16_t num)
4518 {
4519         struct pool_entry *entry, *valid_entry;
4520
4521         if (pool == NULL || num == 0) {
4522                 PMD_DRV_LOG(ERR, "Invalid parameter");
4523                 return -EINVAL;
4524         }
4525
4526         if (pool->num_free < num) {
4527                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4528                             num, pool->num_free);
4529                 return -ENOMEM;
4530         }
4531
4532         valid_entry = NULL;
4533         /* Lookup  in free list and find most fit one */
4534         LIST_FOREACH(entry, &pool->free_list, next) {
4535                 if (entry->len >= num) {
4536                         /* Find best one */
4537                         if (entry->len == num) {
4538                                 valid_entry = entry;
4539                                 break;
4540                         }
4541                         if (valid_entry == NULL || valid_entry->len > entry->len)
4542                                 valid_entry = entry;
4543                 }
4544         }
4545
4546         /* Not find one to satisfy the request, return */
4547         if (valid_entry == NULL) {
4548                 PMD_DRV_LOG(ERR, "No valid entry found");
4549                 return -ENOMEM;
4550         }
4551         /**
4552          * The entry have equal queue number as requested,
4553          * remove it from alloc_list.
4554          */
4555         if (valid_entry->len == num) {
4556                 LIST_REMOVE(valid_entry, next);
4557         } else {
4558                 /**
4559                  * The entry have more numbers than requested,
4560                  * create a new entry for alloc_list and minus its
4561                  * queue base and number in free_list.
4562                  */
4563                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4564                 if (entry == NULL) {
4565                         PMD_DRV_LOG(ERR,
4566                                 "Failed to allocate memory for resource pool");
4567                         return -ENOMEM;
4568                 }
4569                 entry->base = valid_entry->base;
4570                 entry->len = num;
4571                 valid_entry->base += num;
4572                 valid_entry->len -= num;
4573                 valid_entry = entry;
4574         }
4575
4576         /* Insert it into alloc list, not sorted */
4577         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4578
4579         pool->num_free -= valid_entry->len;
4580         pool->num_alloc += valid_entry->len;
4581
4582         return valid_entry->base + pool->base;
4583 }
4584
4585 /**
4586  * bitmap_is_subset - Check whether src2 is subset of src1
4587  **/
4588 static inline int
4589 bitmap_is_subset(uint8_t src1, uint8_t src2)
4590 {
4591         return !((src1 ^ src2) & src2);
4592 }
4593
4594 static enum i40e_status_code
4595 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4596 {
4597         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4598
4599         /* If DCB is not supported, only default TC is supported */
4600         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4601                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4602                 return I40E_NOT_SUPPORTED;
4603         }
4604
4605         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4606                 PMD_DRV_LOG(ERR,
4607                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
4608                         hw->func_caps.enabled_tcmap, enabled_tcmap);
4609                 return I40E_NOT_SUPPORTED;
4610         }
4611         return I40E_SUCCESS;
4612 }
4613
4614 int
4615 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4616                                 struct i40e_vsi_vlan_pvid_info *info)
4617 {
4618         struct i40e_hw *hw;
4619         struct i40e_vsi_context ctxt;
4620         uint8_t vlan_flags = 0;
4621         int ret;
4622
4623         if (vsi == NULL || info == NULL) {
4624                 PMD_DRV_LOG(ERR, "invalid parameters");
4625                 return I40E_ERR_PARAM;
4626         }
4627
4628         if (info->on) {
4629                 vsi->info.pvid = info->config.pvid;
4630                 /**
4631                  * If insert pvid is enabled, only tagged pkts are
4632                  * allowed to be sent out.
4633                  */
4634                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4635                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4636         } else {
4637                 vsi->info.pvid = 0;
4638                 if (info->config.reject.tagged == 0)
4639                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4640
4641                 if (info->config.reject.untagged == 0)
4642                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4643         }
4644         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4645                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
4646         vsi->info.port_vlan_flags |= vlan_flags;
4647         vsi->info.valid_sections =
4648                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4649         memset(&ctxt, 0, sizeof(ctxt));
4650         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4651         ctxt.seid = vsi->seid;
4652
4653         hw = I40E_VSI_TO_HW(vsi);
4654         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4655         if (ret != I40E_SUCCESS)
4656                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4657
4658         return ret;
4659 }
4660
4661 static int
4662 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4663 {
4664         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4665         int i, ret;
4666         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4667
4668         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4669         if (ret != I40E_SUCCESS)
4670                 return ret;
4671
4672         if (!vsi->seid) {
4673                 PMD_DRV_LOG(ERR, "seid not valid");
4674                 return -EINVAL;
4675         }
4676
4677         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4678         tc_bw_data.tc_valid_bits = enabled_tcmap;
4679         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4680                 tc_bw_data.tc_bw_credits[i] =
4681                         (enabled_tcmap & (1 << i)) ? 1 : 0;
4682
4683         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4684         if (ret != I40E_SUCCESS) {
4685                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4686                 return ret;
4687         }
4688
4689         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4690                                         sizeof(vsi->info.qs_handle));
4691         return I40E_SUCCESS;
4692 }
4693
4694 static enum i40e_status_code
4695 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4696                                  struct i40e_aqc_vsi_properties_data *info,
4697                                  uint8_t enabled_tcmap)
4698 {
4699         enum i40e_status_code ret;
4700         int i, total_tc = 0;
4701         uint16_t qpnum_per_tc, bsf, qp_idx;
4702
4703         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4704         if (ret != I40E_SUCCESS)
4705                 return ret;
4706
4707         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4708                 if (enabled_tcmap & (1 << i))
4709                         total_tc++;
4710         if (total_tc == 0)
4711                 total_tc = 1;
4712         vsi->enabled_tc = enabled_tcmap;
4713
4714         /* Number of queues per enabled TC */
4715         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4716         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4717         bsf = rte_bsf32(qpnum_per_tc);
4718
4719         /* Adjust the queue number to actual queues that can be applied */
4720         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4721                 vsi->nb_qps = qpnum_per_tc * total_tc;
4722
4723         /**
4724          * Configure TC and queue mapping parameters, for enabled TC,
4725          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4726          * default queue will serve it.
4727          */
4728         qp_idx = 0;
4729         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4730                 if (vsi->enabled_tc & (1 << i)) {
4731                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4732                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4733                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4734                         qp_idx += qpnum_per_tc;
4735                 } else
4736                         info->tc_mapping[i] = 0;
4737         }
4738
4739         /* Associate queue number with VSI */
4740         if (vsi->type == I40E_VSI_SRIOV) {
4741                 info->mapping_flags |=
4742                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4743                 for (i = 0; i < vsi->nb_qps; i++)
4744                         info->queue_mapping[i] =
4745                                 rte_cpu_to_le_16(vsi->base_queue + i);
4746         } else {
4747                 info->mapping_flags |=
4748                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4749                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4750         }
4751         info->valid_sections |=
4752                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4753
4754         return I40E_SUCCESS;
4755 }
4756
4757 static int
4758 i40e_veb_release(struct i40e_veb *veb)
4759 {
4760         struct i40e_vsi *vsi;
4761         struct i40e_hw *hw;
4762
4763         if (veb == NULL)
4764                 return -EINVAL;
4765
4766         if (!TAILQ_EMPTY(&veb->head)) {
4767                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4768                 return -EACCES;
4769         }
4770         /* associate_vsi field is NULL for floating VEB */
4771         if (veb->associate_vsi != NULL) {
4772                 vsi = veb->associate_vsi;
4773                 hw = I40E_VSI_TO_HW(vsi);
4774
4775                 vsi->uplink_seid = veb->uplink_seid;
4776                 vsi->veb = NULL;
4777         } else {
4778                 veb->associate_pf->main_vsi->floating_veb = NULL;
4779                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4780         }
4781
4782         i40e_aq_delete_element(hw, veb->seid, NULL);
4783         rte_free(veb);
4784         return I40E_SUCCESS;
4785 }
4786
4787 /* Setup a veb */
4788 static struct i40e_veb *
4789 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4790 {
4791         struct i40e_veb *veb;
4792         int ret;
4793         struct i40e_hw *hw;
4794
4795         if (pf == NULL) {
4796                 PMD_DRV_LOG(ERR,
4797                             "veb setup failed, associated PF shouldn't null");
4798                 return NULL;
4799         }
4800         hw = I40E_PF_TO_HW(pf);
4801
4802         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4803         if (!veb) {
4804                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4805                 goto fail;
4806         }
4807
4808         veb->associate_vsi = vsi;
4809         veb->associate_pf = pf;
4810         TAILQ_INIT(&veb->head);
4811         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4812
4813         /* create floating veb if vsi is NULL */
4814         if (vsi != NULL) {
4815                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4816                                       I40E_DEFAULT_TCMAP, false,
4817                                       &veb->seid, false, NULL);
4818         } else {
4819                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4820                                       true, &veb->seid, false, NULL);
4821         }
4822
4823         if (ret != I40E_SUCCESS) {
4824                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4825                             hw->aq.asq_last_status);
4826                 goto fail;
4827         }
4828         veb->enabled_tc = I40E_DEFAULT_TCMAP;
4829
4830         /* get statistics index */
4831         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4832                                 &veb->stats_idx, NULL, NULL, NULL);
4833         if (ret != I40E_SUCCESS) {
4834                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
4835                             hw->aq.asq_last_status);
4836                 goto fail;
4837         }
4838         /* Get VEB bandwidth, to be implemented */
4839         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4840         if (vsi)
4841                 vsi->uplink_seid = veb->seid;
4842
4843         return veb;
4844 fail:
4845         rte_free(veb);
4846         return NULL;
4847 }
4848
4849 int
4850 i40e_vsi_release(struct i40e_vsi *vsi)
4851 {
4852         struct i40e_pf *pf;
4853         struct i40e_hw *hw;
4854         struct i40e_vsi_list *vsi_list;
4855         void *temp;
4856         int ret;
4857         struct i40e_mac_filter *f;
4858         uint16_t user_param;
4859
4860         if (!vsi)
4861                 return I40E_SUCCESS;
4862
4863         if (!vsi->adapter)
4864                 return -EFAULT;
4865
4866         user_param = vsi->user_param;
4867
4868         pf = I40E_VSI_TO_PF(vsi);
4869         hw = I40E_VSI_TO_HW(vsi);
4870
4871         /* VSI has child to attach, release child first */
4872         if (vsi->veb) {
4873                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
4874                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4875                                 return -1;
4876                 }
4877                 i40e_veb_release(vsi->veb);
4878         }
4879
4880         if (vsi->floating_veb) {
4881                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
4882                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4883                                 return -1;
4884                 }
4885         }
4886
4887         /* Remove all macvlan filters of the VSI */
4888         i40e_vsi_remove_all_macvlan_filter(vsi);
4889         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
4890                 rte_free(f);
4891
4892         if (vsi->type != I40E_VSI_MAIN &&
4893             ((vsi->type != I40E_VSI_SRIOV) ||
4894             !pf->floating_veb_list[user_param])) {
4895                 /* Remove vsi from parent's sibling list */
4896                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
4897                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4898                         return I40E_ERR_PARAM;
4899                 }
4900                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
4901                                 &vsi->sib_vsi_list, list);
4902
4903                 /* Remove all switch element of the VSI */
4904                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4905                 if (ret != I40E_SUCCESS)
4906                         PMD_DRV_LOG(ERR, "Failed to delete element");
4907         }
4908
4909         if ((vsi->type == I40E_VSI_SRIOV) &&
4910             pf->floating_veb_list[user_param]) {
4911                 /* Remove vsi from parent's sibling list */
4912                 if (vsi->parent_vsi == NULL ||
4913                     vsi->parent_vsi->floating_veb == NULL) {
4914                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4915                         return I40E_ERR_PARAM;
4916                 }
4917                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
4918                              &vsi->sib_vsi_list, list);
4919
4920                 /* Remove all switch element of the VSI */
4921                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4922                 if (ret != I40E_SUCCESS)
4923                         PMD_DRV_LOG(ERR, "Failed to delete element");
4924         }
4925
4926         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
4927
4928         if (vsi->type != I40E_VSI_SRIOV)
4929                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
4930         rte_free(vsi);
4931
4932         return I40E_SUCCESS;
4933 }
4934
4935 static int
4936 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
4937 {
4938         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4939         struct i40e_aqc_remove_macvlan_element_data def_filter;
4940         struct i40e_mac_filter_info filter;
4941         int ret;
4942
4943         if (vsi->type != I40E_VSI_MAIN)
4944                 return I40E_ERR_CONFIG;
4945         memset(&def_filter, 0, sizeof(def_filter));
4946         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
4947                                         ETH_ADDR_LEN);
4948         def_filter.vlan_tag = 0;
4949         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4950                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4951         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
4952         if (ret != I40E_SUCCESS) {
4953                 struct i40e_mac_filter *f;
4954                 struct ether_addr *mac;
4955
4956                 PMD_DRV_LOG(DEBUG,
4957                             "Cannot remove the default macvlan filter");
4958                 /* It needs to add the permanent mac into mac list */
4959                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4960                 if (f == NULL) {
4961                         PMD_DRV_LOG(ERR, "failed to allocate memory");
4962                         return I40E_ERR_NO_MEMORY;
4963                 }
4964                 mac = &f->mac_info.mac_addr;
4965                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
4966                                 ETH_ADDR_LEN);
4967                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4968                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4969                 vsi->mac_num++;
4970
4971                 return ret;
4972         }
4973         rte_memcpy(&filter.mac_addr,
4974                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
4975         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4976         return i40e_vsi_add_mac(vsi, &filter);
4977 }
4978
4979 /*
4980  * i40e_vsi_get_bw_config - Query VSI BW Information
4981  * @vsi: the VSI to be queried
4982  *
4983  * Returns 0 on success, negative value on failure
4984  */
4985 static enum i40e_status_code
4986 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
4987 {
4988         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
4989         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
4990         struct i40e_hw *hw = &vsi->adapter->hw;
4991         i40e_status ret;
4992         int i;
4993         uint32_t bw_max;
4994
4995         memset(&bw_config, 0, sizeof(bw_config));
4996         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4997         if (ret != I40E_SUCCESS) {
4998                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
4999                             hw->aq.asq_last_status);
5000                 return ret;
5001         }
5002
5003         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5004         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5005                                         &ets_sla_config, NULL);
5006         if (ret != I40E_SUCCESS) {
5007                 PMD_DRV_LOG(ERR,
5008                         "VSI failed to get TC bandwdith configuration %u",
5009                         hw->aq.asq_last_status);
5010                 return ret;
5011         }
5012
5013         /* store and print out BW info */
5014         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5015         vsi->bw_info.bw_max = bw_config.max_bw;
5016         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5017         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5018         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5019                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5020                      I40E_16_BIT_WIDTH);
5021         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5022                 vsi->bw_info.bw_ets_share_credits[i] =
5023                                 ets_sla_config.share_credits[i];
5024                 vsi->bw_info.bw_ets_credits[i] =
5025                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5026                 /* 4 bits per TC, 4th bit is reserved */
5027                 vsi->bw_info.bw_ets_max[i] =
5028                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5029                                   RTE_LEN2MASK(3, uint8_t));
5030                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5031                             vsi->bw_info.bw_ets_share_credits[i]);
5032                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5033                             vsi->bw_info.bw_ets_credits[i]);
5034                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5035                             vsi->bw_info.bw_ets_max[i]);
5036         }
5037
5038         return I40E_SUCCESS;
5039 }
5040
5041 /* i40e_enable_pf_lb
5042  * @pf: pointer to the pf structure
5043  *
5044  * allow loopback on pf
5045  */
5046 static inline void
5047 i40e_enable_pf_lb(struct i40e_pf *pf)
5048 {
5049         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5050         struct i40e_vsi_context ctxt;
5051         int ret;
5052
5053         /* Use the FW API if FW >= v5.0 */
5054         if (hw->aq.fw_maj_ver < 5) {
5055                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5056                 return;
5057         }
5058
5059         memset(&ctxt, 0, sizeof(ctxt));
5060         ctxt.seid = pf->main_vsi_seid;
5061         ctxt.pf_num = hw->pf_id;
5062         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5063         if (ret) {
5064                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5065                             ret, hw->aq.asq_last_status);
5066                 return;
5067         }
5068         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5069         ctxt.info.valid_sections =
5070                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5071         ctxt.info.switch_id |=
5072                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5073
5074         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5075         if (ret)
5076                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5077                             hw->aq.asq_last_status);
5078 }
5079
5080 /* Setup a VSI */
5081 struct i40e_vsi *
5082 i40e_vsi_setup(struct i40e_pf *pf,
5083                enum i40e_vsi_type type,
5084                struct i40e_vsi *uplink_vsi,
5085                uint16_t user_param)
5086 {
5087         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5088         struct i40e_vsi *vsi;
5089         struct i40e_mac_filter_info filter;
5090         int ret;
5091         struct i40e_vsi_context ctxt;
5092         struct ether_addr broadcast =
5093                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5094
5095         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5096             uplink_vsi == NULL) {
5097                 PMD_DRV_LOG(ERR,
5098                         "VSI setup failed, VSI link shouldn't be NULL");
5099                 return NULL;
5100         }
5101
5102         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5103                 PMD_DRV_LOG(ERR,
5104                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5105                 return NULL;
5106         }
5107
5108         /* two situations
5109          * 1.type is not MAIN and uplink vsi is not NULL
5110          * If uplink vsi didn't setup VEB, create one first under veb field
5111          * 2.type is SRIOV and the uplink is NULL
5112          * If floating VEB is NULL, create one veb under floating veb field
5113          */
5114
5115         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5116             uplink_vsi->veb == NULL) {
5117                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5118
5119                 if (uplink_vsi->veb == NULL) {
5120                         PMD_DRV_LOG(ERR, "VEB setup failed");
5121                         return NULL;
5122                 }
5123                 /* set ALLOWLOOPBACk on pf, when veb is created */
5124                 i40e_enable_pf_lb(pf);
5125         }
5126
5127         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5128             pf->main_vsi->floating_veb == NULL) {
5129                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5130
5131                 if (pf->main_vsi->floating_veb == NULL) {
5132                         PMD_DRV_LOG(ERR, "VEB setup failed");
5133                         return NULL;
5134                 }
5135         }
5136
5137         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5138         if (!vsi) {
5139                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5140                 return NULL;
5141         }
5142         TAILQ_INIT(&vsi->mac_list);
5143         vsi->type = type;
5144         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5145         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5146         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5147         vsi->user_param = user_param;
5148         vsi->vlan_anti_spoof_on = 0;
5149         vsi->vlan_filter_on = 0;
5150         /* Allocate queues */
5151         switch (vsi->type) {
5152         case I40E_VSI_MAIN  :
5153                 vsi->nb_qps = pf->lan_nb_qps;
5154                 break;
5155         case I40E_VSI_SRIOV :
5156                 vsi->nb_qps = pf->vf_nb_qps;
5157                 break;
5158         case I40E_VSI_VMDQ2:
5159                 vsi->nb_qps = pf->vmdq_nb_qps;
5160                 break;
5161         case I40E_VSI_FDIR:
5162                 vsi->nb_qps = pf->fdir_nb_qps;
5163                 break;
5164         default:
5165                 goto fail_mem;
5166         }
5167         /*
5168          * The filter status descriptor is reported in rx queue 0,
5169          * while the tx queue for fdir filter programming has no
5170          * such constraints, can be non-zero queues.
5171          * To simplify it, choose FDIR vsi use queue 0 pair.
5172          * To make sure it will use queue 0 pair, queue allocation
5173          * need be done before this function is called
5174          */
5175         if (type != I40E_VSI_FDIR) {
5176                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5177                         if (ret < 0) {
5178                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5179                                                 vsi->seid, ret);
5180                                 goto fail_mem;
5181                         }
5182                         vsi->base_queue = ret;
5183         } else
5184                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5185
5186         /* VF has MSIX interrupt in VF range, don't allocate here */
5187         if (type == I40E_VSI_MAIN) {
5188                 if (pf->support_multi_driver) {
5189                         /* If support multi-driver, need to use INT0 instead of
5190                          * allocating from msix pool. The Msix pool is init from
5191                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5192                          * to 1 without calling i40e_res_pool_alloc.
5193                          */
5194                         vsi->msix_intr = 0;
5195                         vsi->nb_msix = 1;
5196                 } else {
5197                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5198                                                   RTE_MIN(vsi->nb_qps,
5199                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5200                         if (ret < 0) {
5201                                 PMD_DRV_LOG(ERR,
5202                                             "VSI MAIN %d get heap failed %d",
5203                                             vsi->seid, ret);
5204                                 goto fail_queue_alloc;
5205                         }
5206                         vsi->msix_intr = ret;
5207                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5208                                                RTE_MAX_RXTX_INTR_VEC_ID);
5209                 }
5210         } else if (type != I40E_VSI_SRIOV) {
5211                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5212                 if (ret < 0) {
5213                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5214                         goto fail_queue_alloc;
5215                 }
5216                 vsi->msix_intr = ret;
5217                 vsi->nb_msix = 1;
5218         } else {
5219                 vsi->msix_intr = 0;
5220                 vsi->nb_msix = 0;
5221         }
5222
5223         /* Add VSI */
5224         if (type == I40E_VSI_MAIN) {
5225                 /* For main VSI, no need to add since it's default one */
5226                 vsi->uplink_seid = pf->mac_seid;
5227                 vsi->seid = pf->main_vsi_seid;
5228                 /* Bind queues with specific MSIX interrupt */
5229                 /**
5230                  * Needs 2 interrupt at least, one for misc cause which will
5231                  * enabled from OS side, Another for queues binding the
5232                  * interrupt from device side only.
5233                  */
5234
5235                 /* Get default VSI parameters from hardware */
5236                 memset(&ctxt, 0, sizeof(ctxt));
5237                 ctxt.seid = vsi->seid;
5238                 ctxt.pf_num = hw->pf_id;
5239                 ctxt.uplink_seid = vsi->uplink_seid;
5240                 ctxt.vf_num = 0;
5241                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5242                 if (ret != I40E_SUCCESS) {
5243                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5244                         goto fail_msix_alloc;
5245                 }
5246                 rte_memcpy(&vsi->info, &ctxt.info,
5247                         sizeof(struct i40e_aqc_vsi_properties_data));
5248                 vsi->vsi_id = ctxt.vsi_number;
5249                 vsi->info.valid_sections = 0;
5250
5251                 /* Configure tc, enabled TC0 only */
5252                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5253                         I40E_SUCCESS) {
5254                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5255                         goto fail_msix_alloc;
5256                 }
5257
5258                 /* TC, queue mapping */
5259                 memset(&ctxt, 0, sizeof(ctxt));
5260                 vsi->info.valid_sections |=
5261                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5262                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5263                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5264                 rte_memcpy(&ctxt.info, &vsi->info,
5265                         sizeof(struct i40e_aqc_vsi_properties_data));
5266                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5267                                                 I40E_DEFAULT_TCMAP);
5268                 if (ret != I40E_SUCCESS) {
5269                         PMD_DRV_LOG(ERR,
5270                                 "Failed to configure TC queue mapping");
5271                         goto fail_msix_alloc;
5272                 }
5273                 ctxt.seid = vsi->seid;
5274                 ctxt.pf_num = hw->pf_id;
5275                 ctxt.uplink_seid = vsi->uplink_seid;
5276                 ctxt.vf_num = 0;
5277
5278                 /* Update VSI parameters */
5279                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5280                 if (ret != I40E_SUCCESS) {
5281                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5282                         goto fail_msix_alloc;
5283                 }
5284
5285                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5286                                                 sizeof(vsi->info.tc_mapping));
5287                 rte_memcpy(&vsi->info.queue_mapping,
5288                                 &ctxt.info.queue_mapping,
5289                         sizeof(vsi->info.queue_mapping));
5290                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5291                 vsi->info.valid_sections = 0;
5292
5293                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5294                                 ETH_ADDR_LEN);
5295
5296                 /**
5297                  * Updating default filter settings are necessary to prevent
5298                  * reception of tagged packets.
5299                  * Some old firmware configurations load a default macvlan
5300                  * filter which accepts both tagged and untagged packets.
5301                  * The updating is to use a normal filter instead if needed.
5302                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5303                  * The firmware with correct configurations load the default
5304                  * macvlan filter which is expected and cannot be removed.
5305                  */
5306                 i40e_update_default_filter_setting(vsi);
5307                 i40e_config_qinq(hw, vsi);
5308         } else if (type == I40E_VSI_SRIOV) {
5309                 memset(&ctxt, 0, sizeof(ctxt));
5310                 /**
5311                  * For other VSI, the uplink_seid equals to uplink VSI's
5312                  * uplink_seid since they share same VEB
5313                  */
5314                 if (uplink_vsi == NULL)
5315                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5316                 else
5317                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5318                 ctxt.pf_num = hw->pf_id;
5319                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5320                 ctxt.uplink_seid = vsi->uplink_seid;
5321                 ctxt.connection_type = 0x1;
5322                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5323
5324                 /* Use the VEB configuration if FW >= v5.0 */
5325                 if (hw->aq.fw_maj_ver >= 5) {
5326                         /* Configure switch ID */
5327                         ctxt.info.valid_sections |=
5328                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5329                         ctxt.info.switch_id =
5330                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5331                 }
5332
5333                 /* Configure port/vlan */
5334                 ctxt.info.valid_sections |=
5335                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5336                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5337                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5338                                                 hw->func_caps.enabled_tcmap);
5339                 if (ret != I40E_SUCCESS) {
5340                         PMD_DRV_LOG(ERR,
5341                                 "Failed to configure TC queue mapping");
5342                         goto fail_msix_alloc;
5343                 }
5344
5345                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5346                 ctxt.info.valid_sections |=
5347                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5348                 /**
5349                  * Since VSI is not created yet, only configure parameter,
5350                  * will add vsi below.
5351                  */
5352
5353                 i40e_config_qinq(hw, vsi);
5354         } else if (type == I40E_VSI_VMDQ2) {
5355                 memset(&ctxt, 0, sizeof(ctxt));
5356                 /*
5357                  * For other VSI, the uplink_seid equals to uplink VSI's
5358                  * uplink_seid since they share same VEB
5359                  */
5360                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5361                 ctxt.pf_num = hw->pf_id;
5362                 ctxt.vf_num = 0;
5363                 ctxt.uplink_seid = vsi->uplink_seid;
5364                 ctxt.connection_type = 0x1;
5365                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5366
5367                 ctxt.info.valid_sections |=
5368                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5369                 /* user_param carries flag to enable loop back */
5370                 if (user_param) {
5371                         ctxt.info.switch_id =
5372                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5373                         ctxt.info.switch_id |=
5374                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5375                 }
5376
5377                 /* Configure port/vlan */
5378                 ctxt.info.valid_sections |=
5379                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5380                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5381                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5382                                                 I40E_DEFAULT_TCMAP);
5383                 if (ret != I40E_SUCCESS) {
5384                         PMD_DRV_LOG(ERR,
5385                                 "Failed to configure TC queue mapping");
5386                         goto fail_msix_alloc;
5387                 }
5388                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5389                 ctxt.info.valid_sections |=
5390                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5391         } else if (type == I40E_VSI_FDIR) {
5392                 memset(&ctxt, 0, sizeof(ctxt));
5393                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5394                 ctxt.pf_num = hw->pf_id;
5395                 ctxt.vf_num = 0;
5396                 ctxt.uplink_seid = vsi->uplink_seid;
5397                 ctxt.connection_type = 0x1;     /* regular data port */
5398                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5399                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5400                                                 I40E_DEFAULT_TCMAP);
5401                 if (ret != I40E_SUCCESS) {
5402                         PMD_DRV_LOG(ERR,
5403                                 "Failed to configure TC queue mapping.");
5404                         goto fail_msix_alloc;
5405                 }
5406                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5407                 ctxt.info.valid_sections |=
5408                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5409         } else {
5410                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5411                 goto fail_msix_alloc;
5412         }
5413
5414         if (vsi->type != I40E_VSI_MAIN) {
5415                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5416                 if (ret != I40E_SUCCESS) {
5417                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5418                                     hw->aq.asq_last_status);
5419                         goto fail_msix_alloc;
5420                 }
5421                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5422                 vsi->info.valid_sections = 0;
5423                 vsi->seid = ctxt.seid;
5424                 vsi->vsi_id = ctxt.vsi_number;
5425                 vsi->sib_vsi_list.vsi = vsi;
5426                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5427                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5428                                           &vsi->sib_vsi_list, list);
5429                 } else {
5430                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5431                                           &vsi->sib_vsi_list, list);
5432                 }
5433         }
5434
5435         /* MAC/VLAN configuration */
5436         rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5437         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5438
5439         ret = i40e_vsi_add_mac(vsi, &filter);
5440         if (ret != I40E_SUCCESS) {
5441                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5442                 goto fail_msix_alloc;
5443         }
5444
5445         /* Get VSI BW information */
5446         i40e_vsi_get_bw_config(vsi);
5447         return vsi;
5448 fail_msix_alloc:
5449         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5450 fail_queue_alloc:
5451         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5452 fail_mem:
5453         rte_free(vsi);
5454         return NULL;
5455 }
5456
5457 /* Configure vlan filter on or off */
5458 int
5459 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5460 {
5461         int i, num;
5462         struct i40e_mac_filter *f;
5463         void *temp;
5464         struct i40e_mac_filter_info *mac_filter;
5465         enum rte_mac_filter_type desired_filter;
5466         int ret = I40E_SUCCESS;
5467
5468         if (on) {
5469                 /* Filter to match MAC and VLAN */
5470                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5471         } else {
5472                 /* Filter to match only MAC */
5473                 desired_filter = RTE_MAC_PERFECT_MATCH;
5474         }
5475
5476         num = vsi->mac_num;
5477
5478         mac_filter = rte_zmalloc("mac_filter_info_data",
5479                                  num * sizeof(*mac_filter), 0);
5480         if (mac_filter == NULL) {
5481                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5482                 return I40E_ERR_NO_MEMORY;
5483         }
5484
5485         i = 0;
5486
5487         /* Remove all existing mac */
5488         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5489                 mac_filter[i] = f->mac_info;
5490                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5491                 if (ret) {
5492                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5493                                     on ? "enable" : "disable");
5494                         goto DONE;
5495                 }
5496                 i++;
5497         }
5498
5499         /* Override with new filter */
5500         for (i = 0; i < num; i++) {
5501                 mac_filter[i].filter_type = desired_filter;
5502                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5503                 if (ret) {
5504                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5505                                     on ? "enable" : "disable");
5506                         goto DONE;
5507                 }
5508         }
5509
5510 DONE:
5511         rte_free(mac_filter);
5512         return ret;
5513 }
5514
5515 /* Configure vlan stripping on or off */
5516 int
5517 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5518 {
5519         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5520         struct i40e_vsi_context ctxt;
5521         uint8_t vlan_flags;
5522         int ret = I40E_SUCCESS;
5523
5524         /* Check if it has been already on or off */
5525         if (vsi->info.valid_sections &
5526                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5527                 if (on) {
5528                         if ((vsi->info.port_vlan_flags &
5529                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5530                                 return 0; /* already on */
5531                 } else {
5532                         if ((vsi->info.port_vlan_flags &
5533                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5534                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5535                                 return 0; /* already off */
5536                 }
5537         }
5538
5539         if (on)
5540                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5541         else
5542                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5543         vsi->info.valid_sections =
5544                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5545         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5546         vsi->info.port_vlan_flags |= vlan_flags;
5547         ctxt.seid = vsi->seid;
5548         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5549         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5550         if (ret)
5551                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5552                             on ? "enable" : "disable");
5553
5554         return ret;
5555 }
5556
5557 static int
5558 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5559 {
5560         struct rte_eth_dev_data *data = dev->data;
5561         int ret;
5562         int mask = 0;
5563
5564         /* Apply vlan offload setting */
5565         mask = ETH_VLAN_STRIP_MASK |
5566                ETH_VLAN_FILTER_MASK |
5567                ETH_VLAN_EXTEND_MASK;
5568         ret = i40e_vlan_offload_set(dev, mask);
5569         if (ret) {
5570                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
5571                 return ret;
5572         }
5573
5574         /* Apply pvid setting */
5575         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5576                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
5577         if (ret)
5578                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5579
5580         return ret;
5581 }
5582
5583 static int
5584 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5585 {
5586         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5587
5588         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5589 }
5590
5591 static int
5592 i40e_update_flow_control(struct i40e_hw *hw)
5593 {
5594 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5595         struct i40e_link_status link_status;
5596         uint32_t rxfc = 0, txfc = 0, reg;
5597         uint8_t an_info;
5598         int ret;
5599
5600         memset(&link_status, 0, sizeof(link_status));
5601         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5602         if (ret != I40E_SUCCESS) {
5603                 PMD_DRV_LOG(ERR, "Failed to get link status information");
5604                 goto write_reg; /* Disable flow control */
5605         }
5606
5607         an_info = hw->phy.link_info.an_info;
5608         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5609                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5610                 ret = I40E_ERR_NOT_READY;
5611                 goto write_reg; /* Disable flow control */
5612         }
5613         /**
5614          * If link auto negotiation is enabled, flow control needs to
5615          * be configured according to it
5616          */
5617         switch (an_info & I40E_LINK_PAUSE_RXTX) {
5618         case I40E_LINK_PAUSE_RXTX:
5619                 rxfc = 1;
5620                 txfc = 1;
5621                 hw->fc.current_mode = I40E_FC_FULL;
5622                 break;
5623         case I40E_AQ_LINK_PAUSE_RX:
5624                 rxfc = 1;
5625                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5626                 break;
5627         case I40E_AQ_LINK_PAUSE_TX:
5628                 txfc = 1;
5629                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5630                 break;
5631         default:
5632                 hw->fc.current_mode = I40E_FC_NONE;
5633                 break;
5634         }
5635
5636 write_reg:
5637         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5638                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5639         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5640         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5641         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5642         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5643
5644         return ret;
5645 }
5646
5647 /* PF setup */
5648 static int
5649 i40e_pf_setup(struct i40e_pf *pf)
5650 {
5651         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5652         struct i40e_filter_control_settings settings;
5653         struct i40e_vsi *vsi;
5654         int ret;
5655
5656         /* Clear all stats counters */
5657         pf->offset_loaded = FALSE;
5658         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5659         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5660         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
5661         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
5662
5663         ret = i40e_pf_get_switch_config(pf);
5664         if (ret != I40E_SUCCESS) {
5665                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5666                 return ret;
5667         }
5668         if (pf->flags & I40E_FLAG_FDIR) {
5669                 /* make queue allocated first, let FDIR use queue pair 0*/
5670                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5671                 if (ret != I40E_FDIR_QUEUE_ID) {
5672                         PMD_DRV_LOG(ERR,
5673                                 "queue allocation fails for FDIR: ret =%d",
5674                                 ret);
5675                         pf->flags &= ~I40E_FLAG_FDIR;
5676                 }
5677         }
5678         /*  main VSI setup */
5679         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5680         if (!vsi) {
5681                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5682                 return I40E_ERR_NOT_READY;
5683         }
5684         pf->main_vsi = vsi;
5685
5686         /* Configure filter control */
5687         memset(&settings, 0, sizeof(settings));
5688         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5689                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5690         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5691                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5692         else {
5693                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5694                         hw->func_caps.rss_table_size);
5695                 return I40E_ERR_PARAM;
5696         }
5697         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5698                 hw->func_caps.rss_table_size);
5699         pf->hash_lut_size = hw->func_caps.rss_table_size;
5700
5701         /* Enable ethtype and macvlan filters */
5702         settings.enable_ethtype = TRUE;
5703         settings.enable_macvlan = TRUE;
5704         ret = i40e_set_filter_control(hw, &settings);
5705         if (ret)
5706                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5707                                                                 ret);
5708
5709         /* Update flow control according to the auto negotiation */
5710         i40e_update_flow_control(hw);
5711
5712         return I40E_SUCCESS;
5713 }
5714
5715 int
5716 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5717 {
5718         uint32_t reg;
5719         uint16_t j;
5720
5721         /**
5722          * Set or clear TX Queue Disable flags,
5723          * which is required by hardware.
5724          */
5725         i40e_pre_tx_queue_cfg(hw, q_idx, on);
5726         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5727
5728         /* Wait until the request is finished */
5729         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5730                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5731                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5732                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5733                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5734                                                         & 0x1))) {
5735                         break;
5736                 }
5737         }
5738         if (on) {
5739                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5740                         return I40E_SUCCESS; /* already on, skip next steps */
5741
5742                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5743                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5744         } else {
5745                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5746                         return I40E_SUCCESS; /* already off, skip next steps */
5747                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5748         }
5749         /* Write the register */
5750         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5751         /* Check the result */
5752         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5753                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5754                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5755                 if (on) {
5756                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5757                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5758                                 break;
5759                 } else {
5760                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5761                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5762                                 break;
5763                 }
5764         }
5765         /* Check if it is timeout */
5766         if (j >= I40E_CHK_Q_ENA_COUNT) {
5767                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5768                             (on ? "enable" : "disable"), q_idx);
5769                 return I40E_ERR_TIMEOUT;
5770         }
5771
5772         return I40E_SUCCESS;
5773 }
5774
5775 /* Swith on or off the tx queues */
5776 static int
5777 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5778 {
5779         struct rte_eth_dev_data *dev_data = pf->dev_data;
5780         struct i40e_tx_queue *txq;
5781         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5782         uint16_t i;
5783         int ret;
5784
5785         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5786                 txq = dev_data->tx_queues[i];
5787                 /* Don't operate the queue if not configured or
5788                  * if starting only per queue */
5789                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5790                         continue;
5791                 if (on)
5792                         ret = i40e_dev_tx_queue_start(dev, i);
5793                 else
5794                         ret = i40e_dev_tx_queue_stop(dev, i);
5795                 if ( ret != I40E_SUCCESS)
5796                         return ret;
5797         }
5798
5799         return I40E_SUCCESS;
5800 }
5801
5802 int
5803 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5804 {
5805         uint32_t reg;
5806         uint16_t j;
5807
5808         /* Wait until the request is finished */
5809         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5810                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5811                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5812                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5813                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5814                         break;
5815         }
5816
5817         if (on) {
5818                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5819                         return I40E_SUCCESS; /* Already on, skip next steps */
5820                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5821         } else {
5822                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5823                         return I40E_SUCCESS; /* Already off, skip next steps */
5824                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5825         }
5826
5827         /* Write the register */
5828         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5829         /* Check the result */
5830         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5831                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5832                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5833                 if (on) {
5834                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5835                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
5836                                 break;
5837                 } else {
5838                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5839                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5840                                 break;
5841                 }
5842         }
5843
5844         /* Check if it is timeout */
5845         if (j >= I40E_CHK_Q_ENA_COUNT) {
5846                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
5847                             (on ? "enable" : "disable"), q_idx);
5848                 return I40E_ERR_TIMEOUT;
5849         }
5850
5851         return I40E_SUCCESS;
5852 }
5853 /* Switch on or off the rx queues */
5854 static int
5855 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
5856 {
5857         struct rte_eth_dev_data *dev_data = pf->dev_data;
5858         struct i40e_rx_queue *rxq;
5859         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5860         uint16_t i;
5861         int ret;
5862
5863         for (i = 0; i < dev_data->nb_rx_queues; i++) {
5864                 rxq = dev_data->rx_queues[i];
5865                 /* Don't operate the queue if not configured or
5866                  * if starting only per queue */
5867                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
5868                         continue;
5869                 if (on)
5870                         ret = i40e_dev_rx_queue_start(dev, i);
5871                 else
5872                         ret = i40e_dev_rx_queue_stop(dev, i);
5873                 if (ret != I40E_SUCCESS)
5874                         return ret;
5875         }
5876
5877         return I40E_SUCCESS;
5878 }
5879
5880 /* Switch on or off all the rx/tx queues */
5881 int
5882 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
5883 {
5884         int ret;
5885
5886         if (on) {
5887                 /* enable rx queues before enabling tx queues */
5888                 ret = i40e_dev_switch_rx_queues(pf, on);
5889                 if (ret) {
5890                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
5891                         return ret;
5892                 }
5893                 ret = i40e_dev_switch_tx_queues(pf, on);
5894         } else {
5895                 /* Stop tx queues before stopping rx queues */
5896                 ret = i40e_dev_switch_tx_queues(pf, on);
5897                 if (ret) {
5898                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
5899                         return ret;
5900                 }
5901                 ret = i40e_dev_switch_rx_queues(pf, on);
5902         }
5903
5904         return ret;
5905 }
5906
5907 /* Initialize VSI for TX */
5908 static int
5909 i40e_dev_tx_init(struct i40e_pf *pf)
5910 {
5911         struct rte_eth_dev_data *data = pf->dev_data;
5912         uint16_t i;
5913         uint32_t ret = I40E_SUCCESS;
5914         struct i40e_tx_queue *txq;
5915
5916         for (i = 0; i < data->nb_tx_queues; i++) {
5917                 txq = data->tx_queues[i];
5918                 if (!txq || !txq->q_set)
5919                         continue;
5920                 ret = i40e_tx_queue_init(txq);
5921                 if (ret != I40E_SUCCESS)
5922                         break;
5923         }
5924         if (ret == I40E_SUCCESS)
5925                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
5926                                      ->eth_dev);
5927
5928         return ret;
5929 }
5930
5931 /* Initialize VSI for RX */
5932 static int
5933 i40e_dev_rx_init(struct i40e_pf *pf)
5934 {
5935         struct rte_eth_dev_data *data = pf->dev_data;
5936         int ret = I40E_SUCCESS;
5937         uint16_t i;
5938         struct i40e_rx_queue *rxq;
5939
5940         i40e_pf_config_mq_rx(pf);
5941         for (i = 0; i < data->nb_rx_queues; i++) {
5942                 rxq = data->rx_queues[i];
5943                 if (!rxq || !rxq->q_set)
5944                         continue;
5945
5946                 ret = i40e_rx_queue_init(rxq);
5947                 if (ret != I40E_SUCCESS) {
5948                         PMD_DRV_LOG(ERR,
5949                                 "Failed to do RX queue initialization");
5950                         break;
5951                 }
5952         }
5953         if (ret == I40E_SUCCESS)
5954                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
5955                                      ->eth_dev);
5956
5957         return ret;
5958 }
5959
5960 static int
5961 i40e_dev_rxtx_init(struct i40e_pf *pf)
5962 {
5963         int err;
5964
5965         err = i40e_dev_tx_init(pf);
5966         if (err) {
5967                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
5968                 return err;
5969         }
5970         err = i40e_dev_rx_init(pf);
5971         if (err) {
5972                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
5973                 return err;
5974         }
5975
5976         return err;
5977 }
5978
5979 static int
5980 i40e_vmdq_setup(struct rte_eth_dev *dev)
5981 {
5982         struct rte_eth_conf *conf = &dev->data->dev_conf;
5983         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5984         int i, err, conf_vsis, j, loop;
5985         struct i40e_vsi *vsi;
5986         struct i40e_vmdq_info *vmdq_info;
5987         struct rte_eth_vmdq_rx_conf *vmdq_conf;
5988         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5989
5990         /*
5991          * Disable interrupt to avoid message from VF. Furthermore, it will
5992          * avoid race condition in VSI creation/destroy.
5993          */
5994         i40e_pf_disable_irq0(hw);
5995
5996         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
5997                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
5998                 return -ENOTSUP;
5999         }
6000
6001         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6002         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6003                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6004                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6005                         pf->max_nb_vmdq_vsi);
6006                 return -ENOTSUP;
6007         }
6008
6009         if (pf->vmdq != NULL) {
6010                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6011                 return 0;
6012         }
6013
6014         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6015                                 sizeof(*vmdq_info) * conf_vsis, 0);
6016
6017         if (pf->vmdq == NULL) {
6018                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6019                 return -ENOMEM;
6020         }
6021
6022         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6023
6024         /* Create VMDQ VSI */
6025         for (i = 0; i < conf_vsis; i++) {
6026                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6027                                 vmdq_conf->enable_loop_back);
6028                 if (vsi == NULL) {
6029                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6030                         err = -1;
6031                         goto err_vsi_setup;
6032                 }
6033                 vmdq_info = &pf->vmdq[i];
6034                 vmdq_info->pf = pf;
6035                 vmdq_info->vsi = vsi;
6036         }
6037         pf->nb_cfg_vmdq_vsi = conf_vsis;
6038
6039         /* Configure Vlan */
6040         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6041         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6042                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6043                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6044                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6045                                         vmdq_conf->pool_map[i].vlan_id, j);
6046
6047                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6048                                                 vmdq_conf->pool_map[i].vlan_id);
6049                                 if (err) {
6050                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6051                                         err = -1;
6052                                         goto err_vsi_setup;
6053                                 }
6054                         }
6055                 }
6056         }
6057
6058         i40e_pf_enable_irq0(hw);
6059
6060         return 0;
6061
6062 err_vsi_setup:
6063         for (i = 0; i < conf_vsis; i++)
6064                 if (pf->vmdq[i].vsi == NULL)
6065                         break;
6066                 else
6067                         i40e_vsi_release(pf->vmdq[i].vsi);
6068
6069         rte_free(pf->vmdq);
6070         pf->vmdq = NULL;
6071         i40e_pf_enable_irq0(hw);
6072         return err;
6073 }
6074
6075 static void
6076 i40e_stat_update_32(struct i40e_hw *hw,
6077                    uint32_t reg,
6078                    bool offset_loaded,
6079                    uint64_t *offset,
6080                    uint64_t *stat)
6081 {
6082         uint64_t new_data;
6083
6084         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6085         if (!offset_loaded)
6086                 *offset = new_data;
6087
6088         if (new_data >= *offset)
6089                 *stat = (uint64_t)(new_data - *offset);
6090         else
6091                 *stat = (uint64_t)((new_data +
6092                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6093 }
6094
6095 static void
6096 i40e_stat_update_48(struct i40e_hw *hw,
6097                    uint32_t hireg,
6098                    uint32_t loreg,
6099                    bool offset_loaded,
6100                    uint64_t *offset,
6101                    uint64_t *stat)
6102 {
6103         uint64_t new_data;
6104
6105         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6106         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6107                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6108
6109         if (!offset_loaded)
6110                 *offset = new_data;
6111
6112         if (new_data >= *offset)
6113                 *stat = new_data - *offset;
6114         else
6115                 *stat = (uint64_t)((new_data +
6116                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6117
6118         *stat &= I40E_48_BIT_MASK;
6119 }
6120
6121 /* Disable IRQ0 */
6122 void
6123 i40e_pf_disable_irq0(struct i40e_hw *hw)
6124 {
6125         /* Disable all interrupt types */
6126         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6127                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6128         I40E_WRITE_FLUSH(hw);
6129 }
6130
6131 /* Enable IRQ0 */
6132 void
6133 i40e_pf_enable_irq0(struct i40e_hw *hw)
6134 {
6135         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6136                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6137                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6138                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6139         I40E_WRITE_FLUSH(hw);
6140 }
6141
6142 static void
6143 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6144 {
6145         /* read pending request and disable first */
6146         i40e_pf_disable_irq0(hw);
6147         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6148         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6149                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6150
6151         if (no_queue)
6152                 /* Link no queues with irq0 */
6153                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6154                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6155 }
6156
6157 static void
6158 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6159 {
6160         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6161         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6162         int i;
6163         uint16_t abs_vf_id;
6164         uint32_t index, offset, val;
6165
6166         if (!pf->vfs)
6167                 return;
6168         /**
6169          * Try to find which VF trigger a reset, use absolute VF id to access
6170          * since the reg is global register.
6171          */
6172         for (i = 0; i < pf->vf_num; i++) {
6173                 abs_vf_id = hw->func_caps.vf_base_id + i;
6174                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6175                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6176                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6177                 /* VFR event occurred */
6178                 if (val & (0x1 << offset)) {
6179                         int ret;
6180
6181                         /* Clear the event first */
6182                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6183                                                         (0x1 << offset));
6184                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6185                         /**
6186                          * Only notify a VF reset event occurred,
6187                          * don't trigger another SW reset
6188                          */
6189                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6190                         if (ret != I40E_SUCCESS)
6191                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6192                 }
6193         }
6194 }
6195
6196 static void
6197 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6198 {
6199         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6200         int i;
6201
6202         for (i = 0; i < pf->vf_num; i++)
6203                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6204 }
6205
6206 static void
6207 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6208 {
6209         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6210         struct i40e_arq_event_info info;
6211         uint16_t pending, opcode;
6212         int ret;
6213
6214         info.buf_len = I40E_AQ_BUF_SZ;
6215         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6216         if (!info.msg_buf) {
6217                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6218                 return;
6219         }
6220
6221         pending = 1;
6222         while (pending) {
6223                 ret = i40e_clean_arq_element(hw, &info, &pending);
6224
6225                 if (ret != I40E_SUCCESS) {
6226                         PMD_DRV_LOG(INFO,
6227                                 "Failed to read msg from AdminQ, aq_err: %u",
6228                                 hw->aq.asq_last_status);
6229                         break;
6230                 }
6231                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6232
6233                 switch (opcode) {
6234                 case i40e_aqc_opc_send_msg_to_pf:
6235                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6236                         i40e_pf_host_handle_vf_msg(dev,
6237                                         rte_le_to_cpu_16(info.desc.retval),
6238                                         rte_le_to_cpu_32(info.desc.cookie_high),
6239                                         rte_le_to_cpu_32(info.desc.cookie_low),
6240                                         info.msg_buf,
6241                                         info.msg_len);
6242                         break;
6243                 case i40e_aqc_opc_get_link_status:
6244                         ret = i40e_dev_link_update(dev, 0);
6245                         if (!ret)
6246                                 _rte_eth_dev_callback_process(dev,
6247                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6248                         break;
6249                 default:
6250                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6251                                     opcode);
6252                         break;
6253                 }
6254         }
6255         rte_free(info.msg_buf);
6256 }
6257
6258 /**
6259  * Interrupt handler triggered by NIC  for handling
6260  * specific interrupt.
6261  *
6262  * @param handle
6263  *  Pointer to interrupt handle.
6264  * @param param
6265  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6266  *
6267  * @return
6268  *  void
6269  */
6270 static void
6271 i40e_dev_interrupt_handler(void *param)
6272 {
6273         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6274         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6275         uint32_t icr0;
6276
6277         /* Disable interrupt */
6278         i40e_pf_disable_irq0(hw);
6279
6280         /* read out interrupt causes */
6281         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6282
6283         /* No interrupt event indicated */
6284         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6285                 PMD_DRV_LOG(INFO, "No interrupt event");
6286                 goto done;
6287         }
6288         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6289                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6290         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6291                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6292         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6293                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6294         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6295                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6296         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6297                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6298         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6299                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6300         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6301                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6302
6303         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6304                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6305                 i40e_dev_handle_vfr_event(dev);
6306         }
6307         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6308                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6309                 i40e_dev_handle_aq_msg(dev);
6310         }
6311
6312 done:
6313         /* Enable interrupt */
6314         i40e_pf_enable_irq0(hw);
6315         rte_intr_enable(dev->intr_handle);
6316 }
6317
6318 int
6319 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6320                          struct i40e_macvlan_filter *filter,
6321                          int total)
6322 {
6323         int ele_num, ele_buff_size;
6324         int num, actual_num, i;
6325         uint16_t flags;
6326         int ret = I40E_SUCCESS;
6327         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6328         struct i40e_aqc_add_macvlan_element_data *req_list;
6329
6330         if (filter == NULL  || total == 0)
6331                 return I40E_ERR_PARAM;
6332         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6333         ele_buff_size = hw->aq.asq_buf_size;
6334
6335         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6336         if (req_list == NULL) {
6337                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6338                 return I40E_ERR_NO_MEMORY;
6339         }
6340
6341         num = 0;
6342         do {
6343                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6344                 memset(req_list, 0, ele_buff_size);
6345
6346                 for (i = 0; i < actual_num; i++) {
6347                         rte_memcpy(req_list[i].mac_addr,
6348                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6349                         req_list[i].vlan_tag =
6350                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6351
6352                         switch (filter[num + i].filter_type) {
6353                         case RTE_MAC_PERFECT_MATCH:
6354                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6355                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6356                                 break;
6357                         case RTE_MACVLAN_PERFECT_MATCH:
6358                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6359                                 break;
6360                         case RTE_MAC_HASH_MATCH:
6361                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6362                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6363                                 break;
6364                         case RTE_MACVLAN_HASH_MATCH:
6365                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6366                                 break;
6367                         default:
6368                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6369                                 ret = I40E_ERR_PARAM;
6370                                 goto DONE;
6371                         }
6372
6373                         req_list[i].queue_number = 0;
6374
6375                         req_list[i].flags = rte_cpu_to_le_16(flags);
6376                 }
6377
6378                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6379                                                 actual_num, NULL);
6380                 if (ret != I40E_SUCCESS) {
6381                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6382                         goto DONE;
6383                 }
6384                 num += actual_num;
6385         } while (num < total);
6386
6387 DONE:
6388         rte_free(req_list);
6389         return ret;
6390 }
6391
6392 int
6393 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6394                             struct i40e_macvlan_filter *filter,
6395                             int total)
6396 {
6397         int ele_num, ele_buff_size;
6398         int num, actual_num, i;
6399         uint16_t flags;
6400         int ret = I40E_SUCCESS;
6401         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6402         struct i40e_aqc_remove_macvlan_element_data *req_list;
6403
6404         if (filter == NULL  || total == 0)
6405                 return I40E_ERR_PARAM;
6406
6407         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6408         ele_buff_size = hw->aq.asq_buf_size;
6409
6410         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6411         if (req_list == NULL) {
6412                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6413                 return I40E_ERR_NO_MEMORY;
6414         }
6415
6416         num = 0;
6417         do {
6418                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6419                 memset(req_list, 0, ele_buff_size);
6420
6421                 for (i = 0; i < actual_num; i++) {
6422                         rte_memcpy(req_list[i].mac_addr,
6423                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6424                         req_list[i].vlan_tag =
6425                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6426
6427                         switch (filter[num + i].filter_type) {
6428                         case RTE_MAC_PERFECT_MATCH:
6429                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6430                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6431                                 break;
6432                         case RTE_MACVLAN_PERFECT_MATCH:
6433                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6434                                 break;
6435                         case RTE_MAC_HASH_MATCH:
6436                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6437                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6438                                 break;
6439                         case RTE_MACVLAN_HASH_MATCH:
6440                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6441                                 break;
6442                         default:
6443                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6444                                 ret = I40E_ERR_PARAM;
6445                                 goto DONE;
6446                         }
6447                         req_list[i].flags = rte_cpu_to_le_16(flags);
6448                 }
6449
6450                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6451                                                 actual_num, NULL);
6452                 if (ret != I40E_SUCCESS) {
6453                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6454                         goto DONE;
6455                 }
6456                 num += actual_num;
6457         } while (num < total);
6458
6459 DONE:
6460         rte_free(req_list);
6461         return ret;
6462 }
6463
6464 /* Find out specific MAC filter */
6465 static struct i40e_mac_filter *
6466 i40e_find_mac_filter(struct i40e_vsi *vsi,
6467                          struct ether_addr *macaddr)
6468 {
6469         struct i40e_mac_filter *f;
6470
6471         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6472                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6473                         return f;
6474         }
6475
6476         return NULL;
6477 }
6478
6479 static bool
6480 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6481                          uint16_t vlan_id)
6482 {
6483         uint32_t vid_idx, vid_bit;
6484
6485         if (vlan_id > ETH_VLAN_ID_MAX)
6486                 return 0;
6487
6488         vid_idx = I40E_VFTA_IDX(vlan_id);
6489         vid_bit = I40E_VFTA_BIT(vlan_id);
6490
6491         if (vsi->vfta[vid_idx] & vid_bit)
6492                 return 1;
6493         else
6494                 return 0;
6495 }
6496
6497 static void
6498 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6499                        uint16_t vlan_id, bool on)
6500 {
6501         uint32_t vid_idx, vid_bit;
6502
6503         vid_idx = I40E_VFTA_IDX(vlan_id);
6504         vid_bit = I40E_VFTA_BIT(vlan_id);
6505
6506         if (on)
6507                 vsi->vfta[vid_idx] |= vid_bit;
6508         else
6509                 vsi->vfta[vid_idx] &= ~vid_bit;
6510 }
6511
6512 void
6513 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6514                      uint16_t vlan_id, bool on)
6515 {
6516         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6517         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6518         int ret;
6519
6520         if (vlan_id > ETH_VLAN_ID_MAX)
6521                 return;
6522
6523         i40e_store_vlan_filter(vsi, vlan_id, on);
6524
6525         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6526                 return;
6527
6528         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6529
6530         if (on) {
6531                 ret = i40e_aq_add_vlan(hw, vsi->seid,
6532                                        &vlan_data, 1, NULL);
6533                 if (ret != I40E_SUCCESS)
6534                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6535         } else {
6536                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6537                                           &vlan_data, 1, NULL);
6538                 if (ret != I40E_SUCCESS)
6539                         PMD_DRV_LOG(ERR,
6540                                     "Failed to remove vlan filter");
6541         }
6542 }
6543
6544 /**
6545  * Find all vlan options for specific mac addr,
6546  * return with actual vlan found.
6547  */
6548 int
6549 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6550                            struct i40e_macvlan_filter *mv_f,
6551                            int num, struct ether_addr *addr)
6552 {
6553         int i;
6554         uint32_t j, k;
6555
6556         /**
6557          * Not to use i40e_find_vlan_filter to decrease the loop time,
6558          * although the code looks complex.
6559           */
6560         if (num < vsi->vlan_num)
6561                 return I40E_ERR_PARAM;
6562
6563         i = 0;
6564         for (j = 0; j < I40E_VFTA_SIZE; j++) {
6565                 if (vsi->vfta[j]) {
6566                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6567                                 if (vsi->vfta[j] & (1 << k)) {
6568                                         if (i > num - 1) {
6569                                                 PMD_DRV_LOG(ERR,
6570                                                         "vlan number doesn't match");
6571                                                 return I40E_ERR_PARAM;
6572                                         }
6573                                         rte_memcpy(&mv_f[i].macaddr,
6574                                                         addr, ETH_ADDR_LEN);
6575                                         mv_f[i].vlan_id =
6576                                                 j * I40E_UINT32_BIT_SIZE + k;
6577                                         i++;
6578                                 }
6579                         }
6580                 }
6581         }
6582         return I40E_SUCCESS;
6583 }
6584
6585 static inline int
6586 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6587                            struct i40e_macvlan_filter *mv_f,
6588                            int num,
6589                            uint16_t vlan)
6590 {
6591         int i = 0;
6592         struct i40e_mac_filter *f;
6593
6594         if (num < vsi->mac_num)
6595                 return I40E_ERR_PARAM;
6596
6597         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6598                 if (i > num - 1) {
6599                         PMD_DRV_LOG(ERR, "buffer number not match");
6600                         return I40E_ERR_PARAM;
6601                 }
6602                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6603                                 ETH_ADDR_LEN);
6604                 mv_f[i].vlan_id = vlan;
6605                 mv_f[i].filter_type = f->mac_info.filter_type;
6606                 i++;
6607         }
6608
6609         return I40E_SUCCESS;
6610 }
6611
6612 static int
6613 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6614 {
6615         int i, j, num;
6616         struct i40e_mac_filter *f;
6617         struct i40e_macvlan_filter *mv_f;
6618         int ret = I40E_SUCCESS;
6619
6620         if (vsi == NULL || vsi->mac_num == 0)
6621                 return I40E_ERR_PARAM;
6622
6623         /* Case that no vlan is set */
6624         if (vsi->vlan_num == 0)
6625                 num = vsi->mac_num;
6626         else
6627                 num = vsi->mac_num * vsi->vlan_num;
6628
6629         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6630         if (mv_f == NULL) {
6631                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6632                 return I40E_ERR_NO_MEMORY;
6633         }
6634
6635         i = 0;
6636         if (vsi->vlan_num == 0) {
6637                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6638                         rte_memcpy(&mv_f[i].macaddr,
6639                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6640                         mv_f[i].filter_type = f->mac_info.filter_type;
6641                         mv_f[i].vlan_id = 0;
6642                         i++;
6643                 }
6644         } else {
6645                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6646                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6647                                         vsi->vlan_num, &f->mac_info.mac_addr);
6648                         if (ret != I40E_SUCCESS)
6649                                 goto DONE;
6650                         for (j = i; j < i + vsi->vlan_num; j++)
6651                                 mv_f[j].filter_type = f->mac_info.filter_type;
6652                         i += vsi->vlan_num;
6653                 }
6654         }
6655
6656         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6657 DONE:
6658         rte_free(mv_f);
6659
6660         return ret;
6661 }
6662
6663 int
6664 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6665 {
6666         struct i40e_macvlan_filter *mv_f;
6667         int mac_num;
6668         int ret = I40E_SUCCESS;
6669
6670         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6671                 return I40E_ERR_PARAM;
6672
6673         /* If it's already set, just return */
6674         if (i40e_find_vlan_filter(vsi,vlan))
6675                 return I40E_SUCCESS;
6676
6677         mac_num = vsi->mac_num;
6678
6679         if (mac_num == 0) {
6680                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6681                 return I40E_ERR_PARAM;
6682         }
6683
6684         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6685
6686         if (mv_f == NULL) {
6687                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6688                 return I40E_ERR_NO_MEMORY;
6689         }
6690
6691         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6692
6693         if (ret != I40E_SUCCESS)
6694                 goto DONE;
6695
6696         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6697
6698         if (ret != I40E_SUCCESS)
6699                 goto DONE;
6700
6701         i40e_set_vlan_filter(vsi, vlan, 1);
6702
6703         vsi->vlan_num++;
6704         ret = I40E_SUCCESS;
6705 DONE:
6706         rte_free(mv_f);
6707         return ret;
6708 }
6709
6710 int
6711 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6712 {
6713         struct i40e_macvlan_filter *mv_f;
6714         int mac_num;
6715         int ret = I40E_SUCCESS;
6716
6717         /**
6718          * Vlan 0 is the generic filter for untagged packets
6719          * and can't be removed.
6720          */
6721         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6722                 return I40E_ERR_PARAM;
6723
6724         /* If can't find it, just return */
6725         if (!i40e_find_vlan_filter(vsi, vlan))
6726                 return I40E_ERR_PARAM;
6727
6728         mac_num = vsi->mac_num;
6729
6730         if (mac_num == 0) {
6731                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6732                 return I40E_ERR_PARAM;
6733         }
6734
6735         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6736
6737         if (mv_f == NULL) {
6738                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6739                 return I40E_ERR_NO_MEMORY;
6740         }
6741
6742         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6743
6744         if (ret != I40E_SUCCESS)
6745                 goto DONE;
6746
6747         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6748
6749         if (ret != I40E_SUCCESS)
6750                 goto DONE;
6751
6752         /* This is last vlan to remove, replace all mac filter with vlan 0 */
6753         if (vsi->vlan_num == 1) {
6754                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6755                 if (ret != I40E_SUCCESS)
6756                         goto DONE;
6757
6758                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6759                 if (ret != I40E_SUCCESS)
6760                         goto DONE;
6761         }
6762
6763         i40e_set_vlan_filter(vsi, vlan, 0);
6764
6765         vsi->vlan_num--;
6766         ret = I40E_SUCCESS;
6767 DONE:
6768         rte_free(mv_f);
6769         return ret;
6770 }
6771
6772 int
6773 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6774 {
6775         struct i40e_mac_filter *f;
6776         struct i40e_macvlan_filter *mv_f;
6777         int i, vlan_num = 0;
6778         int ret = I40E_SUCCESS;
6779
6780         /* If it's add and we've config it, return */
6781         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6782         if (f != NULL)
6783                 return I40E_SUCCESS;
6784         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6785                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6786
6787                 /**
6788                  * If vlan_num is 0, that's the first time to add mac,
6789                  * set mask for vlan_id 0.
6790                  */
6791                 if (vsi->vlan_num == 0) {
6792                         i40e_set_vlan_filter(vsi, 0, 1);
6793                         vsi->vlan_num = 1;
6794                 }
6795                 vlan_num = vsi->vlan_num;
6796         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6797                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6798                 vlan_num = 1;
6799
6800         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6801         if (mv_f == NULL) {
6802                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6803                 return I40E_ERR_NO_MEMORY;
6804         }
6805
6806         for (i = 0; i < vlan_num; i++) {
6807                 mv_f[i].filter_type = mac_filter->filter_type;
6808                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6809                                 ETH_ADDR_LEN);
6810         }
6811
6812         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6813                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6814                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6815                                         &mac_filter->mac_addr);
6816                 if (ret != I40E_SUCCESS)
6817                         goto DONE;
6818         }
6819
6820         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6821         if (ret != I40E_SUCCESS)
6822                 goto DONE;
6823
6824         /* Add the mac addr into mac list */
6825         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6826         if (f == NULL) {
6827                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6828                 ret = I40E_ERR_NO_MEMORY;
6829                 goto DONE;
6830         }
6831         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6832                         ETH_ADDR_LEN);
6833         f->mac_info.filter_type = mac_filter->filter_type;
6834         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6835         vsi->mac_num++;
6836
6837         ret = I40E_SUCCESS;
6838 DONE:
6839         rte_free(mv_f);
6840
6841         return ret;
6842 }
6843
6844 int
6845 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
6846 {
6847         struct i40e_mac_filter *f;
6848         struct i40e_macvlan_filter *mv_f;
6849         int i, vlan_num;
6850         enum rte_mac_filter_type filter_type;
6851         int ret = I40E_SUCCESS;
6852
6853         /* Can't find it, return an error */
6854         f = i40e_find_mac_filter(vsi, addr);
6855         if (f == NULL)
6856                 return I40E_ERR_PARAM;
6857
6858         vlan_num = vsi->vlan_num;
6859         filter_type = f->mac_info.filter_type;
6860         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6861                 filter_type == RTE_MACVLAN_HASH_MATCH) {
6862                 if (vlan_num == 0) {
6863                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
6864                         return I40E_ERR_PARAM;
6865                 }
6866         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
6867                         filter_type == RTE_MAC_HASH_MATCH)
6868                 vlan_num = 1;
6869
6870         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6871         if (mv_f == NULL) {
6872                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6873                 return I40E_ERR_NO_MEMORY;
6874         }
6875
6876         for (i = 0; i < vlan_num; i++) {
6877                 mv_f[i].filter_type = filter_type;
6878                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6879                                 ETH_ADDR_LEN);
6880         }
6881         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6882                         filter_type == RTE_MACVLAN_HASH_MATCH) {
6883                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
6884                 if (ret != I40E_SUCCESS)
6885                         goto DONE;
6886         }
6887
6888         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
6889         if (ret != I40E_SUCCESS)
6890                 goto DONE;
6891
6892         /* Remove the mac addr into mac list */
6893         TAILQ_REMOVE(&vsi->mac_list, f, next);
6894         rte_free(f);
6895         vsi->mac_num--;
6896
6897         ret = I40E_SUCCESS;
6898 DONE:
6899         rte_free(mv_f);
6900         return ret;
6901 }
6902
6903 /* Configure hash enable flags for RSS */
6904 uint64_t
6905 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
6906 {
6907         uint64_t hena = 0;
6908         int i;
6909
6910         if (!flags)
6911                 return hena;
6912
6913         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
6914                 if (flags & (1ULL << i))
6915                         hena |= adapter->pctypes_tbl[i];
6916         }
6917
6918         return hena;
6919 }
6920
6921 /* Parse the hash enable flags */
6922 uint64_t
6923 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
6924 {
6925         uint64_t rss_hf = 0;
6926
6927         if (!flags)
6928                 return rss_hf;
6929         int i;
6930
6931         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
6932                 if (flags & adapter->pctypes_tbl[i])
6933                         rss_hf |= (1ULL << i);
6934         }
6935         return rss_hf;
6936 }
6937
6938 /* Disable RSS */
6939 static void
6940 i40e_pf_disable_rss(struct i40e_pf *pf)
6941 {
6942         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6943
6944         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
6945         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
6946         I40E_WRITE_FLUSH(hw);
6947 }
6948
6949 int
6950 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
6951 {
6952         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6953         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6954         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
6955                            I40E_VFQF_HKEY_MAX_INDEX :
6956                            I40E_PFQF_HKEY_MAX_INDEX;
6957         int ret = 0;
6958
6959         if (!key || key_len == 0) {
6960                 PMD_DRV_LOG(DEBUG, "No key to be configured");
6961                 return 0;
6962         } else if (key_len != (key_idx + 1) *
6963                 sizeof(uint32_t)) {
6964                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
6965                 return -EINVAL;
6966         }
6967
6968         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6969                 struct i40e_aqc_get_set_rss_key_data *key_dw =
6970                         (struct i40e_aqc_get_set_rss_key_data *)key;
6971
6972                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
6973                 if (ret)
6974                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
6975         } else {
6976                 uint32_t *hash_key = (uint32_t *)key;
6977                 uint16_t i;
6978
6979                 if (vsi->type == I40E_VSI_SRIOV) {
6980                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
6981                                 I40E_WRITE_REG(
6982                                         hw,
6983                                         I40E_VFQF_HKEY1(i, vsi->user_param),
6984                                         hash_key[i]);
6985
6986                 } else {
6987                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6988                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
6989                                                hash_key[i]);
6990                 }
6991                 I40E_WRITE_FLUSH(hw);
6992         }
6993
6994         return ret;
6995 }
6996
6997 static int
6998 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
6999 {
7000         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7001         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7002         uint32_t reg;
7003         int ret;
7004
7005         if (!key || !key_len)
7006                 return -EINVAL;
7007
7008         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7009                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7010                         (struct i40e_aqc_get_set_rss_key_data *)key);
7011                 if (ret) {
7012                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7013                         return ret;
7014                 }
7015         } else {
7016                 uint32_t *key_dw = (uint32_t *)key;
7017                 uint16_t i;
7018
7019                 if (vsi->type == I40E_VSI_SRIOV) {
7020                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7021                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7022                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7023                         }
7024                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7025                                    sizeof(uint32_t);
7026                 } else {
7027                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7028                                 reg = I40E_PFQF_HKEY(i);
7029                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7030                         }
7031                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7032                                    sizeof(uint32_t);
7033                 }
7034         }
7035         return 0;
7036 }
7037
7038 static int
7039 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7040 {
7041         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7042         uint64_t hena;
7043         int ret;
7044
7045         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7046                                rss_conf->rss_key_len);
7047         if (ret)
7048                 return ret;
7049
7050         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7051         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7052         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7053         I40E_WRITE_FLUSH(hw);
7054
7055         return 0;
7056 }
7057
7058 static int
7059 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7060                          struct rte_eth_rss_conf *rss_conf)
7061 {
7062         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7063         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7064         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7065         uint64_t hena;
7066
7067         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7068         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7069
7070         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7071                 if (rss_hf != 0) /* Enable RSS */
7072                         return -EINVAL;
7073                 return 0; /* Nothing to do */
7074         }
7075         /* RSS enabled */
7076         if (rss_hf == 0) /* Disable RSS */
7077                 return -EINVAL;
7078
7079         return i40e_hw_rss_hash_set(pf, rss_conf);
7080 }
7081
7082 static int
7083 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7084                            struct rte_eth_rss_conf *rss_conf)
7085 {
7086         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7087         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7088         uint64_t hena;
7089
7090         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7091                          &rss_conf->rss_key_len);
7092
7093         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7094         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7095         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7096
7097         return 0;
7098 }
7099
7100 static int
7101 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7102 {
7103         switch (filter_type) {
7104         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7105                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7106                 break;
7107         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7108                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7109                 break;
7110         case RTE_TUNNEL_FILTER_IMAC_TENID:
7111                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7112                 break;
7113         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7114                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7115                 break;
7116         case ETH_TUNNEL_FILTER_IMAC:
7117                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7118                 break;
7119         case ETH_TUNNEL_FILTER_OIP:
7120                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7121                 break;
7122         case ETH_TUNNEL_FILTER_IIP:
7123                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7124                 break;
7125         default:
7126                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7127                 return -EINVAL;
7128         }
7129
7130         return 0;
7131 }
7132
7133 /* Convert tunnel filter structure */
7134 static int
7135 i40e_tunnel_filter_convert(
7136         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
7137         struct i40e_tunnel_filter *tunnel_filter)
7138 {
7139         ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
7140                         (struct ether_addr *)&tunnel_filter->input.outer_mac);
7141         ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
7142                         (struct ether_addr *)&tunnel_filter->input.inner_mac);
7143         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7144         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7145              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7146             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7147                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7148         else
7149                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7150         tunnel_filter->input.flags = cld_filter->element.flags;
7151         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7152         tunnel_filter->queue = cld_filter->element.queue_number;
7153         rte_memcpy(tunnel_filter->input.general_fields,
7154                    cld_filter->general_fields,
7155                    sizeof(cld_filter->general_fields));
7156
7157         return 0;
7158 }
7159
7160 /* Check if there exists the tunnel filter */
7161 struct i40e_tunnel_filter *
7162 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7163                              const struct i40e_tunnel_filter_input *input)
7164 {
7165         int ret;
7166
7167         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7168         if (ret < 0)
7169                 return NULL;
7170
7171         return tunnel_rule->hash_map[ret];
7172 }
7173
7174 /* Add a tunnel filter into the SW list */
7175 static int
7176 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7177                              struct i40e_tunnel_filter *tunnel_filter)
7178 {
7179         struct i40e_tunnel_rule *rule = &pf->tunnel;
7180         int ret;
7181
7182         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7183         if (ret < 0) {
7184                 PMD_DRV_LOG(ERR,
7185                             "Failed to insert tunnel filter to hash table %d!",
7186                             ret);
7187                 return ret;
7188         }
7189         rule->hash_map[ret] = tunnel_filter;
7190
7191         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7192
7193         return 0;
7194 }
7195
7196 /* Delete a tunnel filter from the SW list */
7197 int
7198 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7199                           struct i40e_tunnel_filter_input *input)
7200 {
7201         struct i40e_tunnel_rule *rule = &pf->tunnel;
7202         struct i40e_tunnel_filter *tunnel_filter;
7203         int ret;
7204
7205         ret = rte_hash_del_key(rule->hash_table, input);
7206         if (ret < 0) {
7207                 PMD_DRV_LOG(ERR,
7208                             "Failed to delete tunnel filter to hash table %d!",
7209                             ret);
7210                 return ret;
7211         }
7212         tunnel_filter = rule->hash_map[ret];
7213         rule->hash_map[ret] = NULL;
7214
7215         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7216         rte_free(tunnel_filter);
7217
7218         return 0;
7219 }
7220
7221 int
7222 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7223                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
7224                         uint8_t add)
7225 {
7226         uint16_t ip_type;
7227         uint32_t ipv4_addr, ipv4_addr_le;
7228         uint8_t i, tun_type = 0;
7229         /* internal varialbe to convert ipv6 byte order */
7230         uint32_t convert_ipv6[4];
7231         int val, ret = 0;
7232         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7233         struct i40e_vsi *vsi = pf->main_vsi;
7234         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7235         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7236         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7237         struct i40e_tunnel_filter *tunnel, *node;
7238         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7239
7240         cld_filter = rte_zmalloc("tunnel_filter",
7241                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7242         0);
7243
7244         if (NULL == cld_filter) {
7245                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7246                 return -ENOMEM;
7247         }
7248         pfilter = cld_filter;
7249
7250         ether_addr_copy(&tunnel_filter->outer_mac,
7251                         (struct ether_addr *)&pfilter->element.outer_mac);
7252         ether_addr_copy(&tunnel_filter->inner_mac,
7253                         (struct ether_addr *)&pfilter->element.inner_mac);
7254
7255         pfilter->element.inner_vlan =
7256                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7257         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7258                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7259                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7260                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7261                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7262                                 &ipv4_addr_le,
7263                                 sizeof(pfilter->element.ipaddr.v4.data));
7264         } else {
7265                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7266                 for (i = 0; i < 4; i++) {
7267                         convert_ipv6[i] =
7268                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7269                 }
7270                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7271                            &convert_ipv6,
7272                            sizeof(pfilter->element.ipaddr.v6.data));
7273         }
7274
7275         /* check tunneled type */
7276         switch (tunnel_filter->tunnel_type) {
7277         case RTE_TUNNEL_TYPE_VXLAN:
7278                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7279                 break;
7280         case RTE_TUNNEL_TYPE_NVGRE:
7281                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7282                 break;
7283         case RTE_TUNNEL_TYPE_IP_IN_GRE:
7284                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7285                 break;
7286         default:
7287                 /* Other tunnel types is not supported. */
7288                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7289                 rte_free(cld_filter);
7290                 return -EINVAL;
7291         }
7292
7293         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7294                                        &pfilter->element.flags);
7295         if (val < 0) {
7296                 rte_free(cld_filter);
7297                 return -EINVAL;
7298         }
7299
7300         pfilter->element.flags |= rte_cpu_to_le_16(
7301                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7302                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7303         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7304         pfilter->element.queue_number =
7305                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7306
7307         /* Check if there is the filter in SW list */
7308         memset(&check_filter, 0, sizeof(check_filter));
7309         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7310         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7311         if (add && node) {
7312                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7313                 rte_free(cld_filter);
7314                 return -EINVAL;
7315         }
7316
7317         if (!add && !node) {
7318                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7319                 rte_free(cld_filter);
7320                 return -EINVAL;
7321         }
7322
7323         if (add) {
7324                 ret = i40e_aq_add_cloud_filters(hw,
7325                                         vsi->seid, &cld_filter->element, 1);
7326                 if (ret < 0) {
7327                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7328                         rte_free(cld_filter);
7329                         return -ENOTSUP;
7330                 }
7331                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7332                 if (tunnel == NULL) {
7333                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7334                         rte_free(cld_filter);
7335                         return -ENOMEM;
7336                 }
7337
7338                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7339                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7340                 if (ret < 0)
7341                         rte_free(tunnel);
7342         } else {
7343                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7344                                                    &cld_filter->element, 1);
7345                 if (ret < 0) {
7346                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7347                         rte_free(cld_filter);
7348                         return -ENOTSUP;
7349                 }
7350                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7351         }
7352
7353         rte_free(cld_filter);
7354         return ret;
7355 }
7356
7357 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7358 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7359 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7360 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7361 #define I40E_TR_GRE_KEY_MASK                    0x400
7362 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7363 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7364
7365 static enum
7366 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7367 {
7368         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7369         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7370         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7371         enum i40e_status_code status = I40E_SUCCESS;
7372
7373         if (pf->support_multi_driver) {
7374                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7375                 return I40E_NOT_SUPPORTED;
7376         }
7377
7378         memset(&filter_replace, 0,
7379                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7380         memset(&filter_replace_buf, 0,
7381                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7382
7383         /* create L1 filter */
7384         filter_replace.old_filter_type =
7385                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7386         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7387         filter_replace.tr_bit = 0;
7388
7389         /* Prepare the buffer, 3 entries */
7390         filter_replace_buf.data[0] =
7391                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7392         filter_replace_buf.data[0] |=
7393                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7394         filter_replace_buf.data[2] = 0xFF;
7395         filter_replace_buf.data[3] = 0xFF;
7396         filter_replace_buf.data[4] =
7397                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7398         filter_replace_buf.data[4] |=
7399                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7400         filter_replace_buf.data[7] = 0xF0;
7401         filter_replace_buf.data[8]
7402                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7403         filter_replace_buf.data[8] |=
7404                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7405         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7406                 I40E_TR_GENEVE_KEY_MASK |
7407                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7408         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7409                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7410                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7411
7412         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7413                                                &filter_replace_buf);
7414         if (!status) {
7415                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7416                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7417                             "cloud l1 type is changed from 0x%x to 0x%x",
7418                             filter_replace.old_filter_type,
7419                             filter_replace.new_filter_type);
7420         }
7421         return status;
7422 }
7423
7424 static enum
7425 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7426 {
7427         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7428         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7429         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7430         enum i40e_status_code status = I40E_SUCCESS;
7431
7432         if (pf->support_multi_driver) {
7433                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7434                 return I40E_NOT_SUPPORTED;
7435         }
7436
7437         /* For MPLSoUDP */
7438         memset(&filter_replace, 0,
7439                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7440         memset(&filter_replace_buf, 0,
7441                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7442         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7443                 I40E_AQC_MIRROR_CLOUD_FILTER;
7444         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7445         filter_replace.new_filter_type =
7446                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7447         /* Prepare the buffer, 2 entries */
7448         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7449         filter_replace_buf.data[0] |=
7450                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7451         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7452         filter_replace_buf.data[4] |=
7453                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7454         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7455                                                &filter_replace_buf);
7456         if (status < 0)
7457                 return status;
7458         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7459                     "cloud filter type is changed from 0x%x to 0x%x",
7460                     filter_replace.old_filter_type,
7461                     filter_replace.new_filter_type);
7462
7463         /* For MPLSoGRE */
7464         memset(&filter_replace, 0,
7465                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7466         memset(&filter_replace_buf, 0,
7467                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7468
7469         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7470                 I40E_AQC_MIRROR_CLOUD_FILTER;
7471         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7472         filter_replace.new_filter_type =
7473                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7474         /* Prepare the buffer, 2 entries */
7475         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7476         filter_replace_buf.data[0] |=
7477                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7478         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7479         filter_replace_buf.data[4] |=
7480                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7481
7482         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7483                                                &filter_replace_buf);
7484         if (!status) {
7485                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7486                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7487                             "cloud filter type is changed from 0x%x to 0x%x",
7488                             filter_replace.old_filter_type,
7489                             filter_replace.new_filter_type);
7490         }
7491         return status;
7492 }
7493
7494 static enum i40e_status_code
7495 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7496 {
7497         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7498         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7499         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7500         enum i40e_status_code status = I40E_SUCCESS;
7501
7502         if (pf->support_multi_driver) {
7503                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7504                 return I40E_NOT_SUPPORTED;
7505         }
7506
7507         /* For GTP-C */
7508         memset(&filter_replace, 0,
7509                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7510         memset(&filter_replace_buf, 0,
7511                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7512         /* create L1 filter */
7513         filter_replace.old_filter_type =
7514                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7515         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
7516         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
7517                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7518         /* Prepare the buffer, 2 entries */
7519         filter_replace_buf.data[0] =
7520                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7521         filter_replace_buf.data[0] |=
7522                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7523         filter_replace_buf.data[2] = 0xFF;
7524         filter_replace_buf.data[3] = 0xFF;
7525         filter_replace_buf.data[4] =
7526                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7527         filter_replace_buf.data[4] |=
7528                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7529         filter_replace_buf.data[6] = 0xFF;
7530         filter_replace_buf.data[7] = 0xFF;
7531         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7532                                                &filter_replace_buf);
7533         if (status < 0)
7534                 return status;
7535         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7536                     "cloud l1 type is changed from 0x%x to 0x%x",
7537                     filter_replace.old_filter_type,
7538                     filter_replace.new_filter_type);
7539
7540         /* for GTP-U */
7541         memset(&filter_replace, 0,
7542                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7543         memset(&filter_replace_buf, 0,
7544                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7545         /* create L1 filter */
7546         filter_replace.old_filter_type =
7547                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
7548         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
7549         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
7550                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7551         /* Prepare the buffer, 2 entries */
7552         filter_replace_buf.data[0] =
7553                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7554         filter_replace_buf.data[0] |=
7555                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7556         filter_replace_buf.data[2] = 0xFF;
7557         filter_replace_buf.data[3] = 0xFF;
7558         filter_replace_buf.data[4] =
7559                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7560         filter_replace_buf.data[4] |=
7561                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7562         filter_replace_buf.data[6] = 0xFF;
7563         filter_replace_buf.data[7] = 0xFF;
7564
7565         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7566                                                &filter_replace_buf);
7567         if (!status) {
7568                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7569                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7570                             "cloud l1 type is changed from 0x%x to 0x%x",
7571                             filter_replace.old_filter_type,
7572                             filter_replace.new_filter_type);
7573         }
7574         return status;
7575 }
7576
7577 static enum
7578 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
7579 {
7580         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7581         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7582         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7583         enum i40e_status_code status = I40E_SUCCESS;
7584
7585         if (pf->support_multi_driver) {
7586                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7587                 return I40E_NOT_SUPPORTED;
7588         }
7589
7590         /* for GTP-C */
7591         memset(&filter_replace, 0,
7592                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7593         memset(&filter_replace_buf, 0,
7594                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7595         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7596         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7597         filter_replace.new_filter_type =
7598                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7599         /* Prepare the buffer, 2 entries */
7600         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
7601         filter_replace_buf.data[0] |=
7602                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7603         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7604         filter_replace_buf.data[4] |=
7605                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7606         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7607                                                &filter_replace_buf);
7608         if (status < 0)
7609                 return status;
7610         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7611                     "cloud filter type is changed from 0x%x to 0x%x",
7612                     filter_replace.old_filter_type,
7613                     filter_replace.new_filter_type);
7614
7615         /* for GTP-U */
7616         memset(&filter_replace, 0,
7617                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7618         memset(&filter_replace_buf, 0,
7619                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7620         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7621         filter_replace.old_filter_type =
7622                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7623         filter_replace.new_filter_type =
7624                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7625         /* Prepare the buffer, 2 entries */
7626         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
7627         filter_replace_buf.data[0] |=
7628                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7629         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7630         filter_replace_buf.data[4] |=
7631                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7632
7633         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7634                                                &filter_replace_buf);
7635         if (!status) {
7636                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7637                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7638                             "cloud filter type is changed from 0x%x to 0x%x",
7639                             filter_replace.old_filter_type,
7640                             filter_replace.new_filter_type);
7641         }
7642         return status;
7643 }
7644
7645 int
7646 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7647                       struct i40e_tunnel_filter_conf *tunnel_filter,
7648                       uint8_t add)
7649 {
7650         uint16_t ip_type;
7651         uint32_t ipv4_addr, ipv4_addr_le;
7652         uint8_t i, tun_type = 0;
7653         /* internal variable to convert ipv6 byte order */
7654         uint32_t convert_ipv6[4];
7655         int val, ret = 0;
7656         struct i40e_pf_vf *vf = NULL;
7657         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7658         struct i40e_vsi *vsi;
7659         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7660         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7661         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7662         struct i40e_tunnel_filter *tunnel, *node;
7663         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7664         uint32_t teid_le;
7665         bool big_buffer = 0;
7666
7667         cld_filter = rte_zmalloc("tunnel_filter",
7668                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7669                          0);
7670
7671         if (cld_filter == NULL) {
7672                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7673                 return -ENOMEM;
7674         }
7675         pfilter = cld_filter;
7676
7677         ether_addr_copy(&tunnel_filter->outer_mac,
7678                         (struct ether_addr *)&pfilter->element.outer_mac);
7679         ether_addr_copy(&tunnel_filter->inner_mac,
7680                         (struct ether_addr *)&pfilter->element.inner_mac);
7681
7682         pfilter->element.inner_vlan =
7683                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7684         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7685                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7686                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7687                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7688                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7689                                 &ipv4_addr_le,
7690                                 sizeof(pfilter->element.ipaddr.v4.data));
7691         } else {
7692                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7693                 for (i = 0; i < 4; i++) {
7694                         convert_ipv6[i] =
7695                         rte_cpu_to_le_32(rte_be_to_cpu_32(
7696                                          tunnel_filter->ip_addr.ipv6_addr[i]));
7697                 }
7698                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7699                            &convert_ipv6,
7700                            sizeof(pfilter->element.ipaddr.v6.data));
7701         }
7702
7703         /* check tunneled type */
7704         switch (tunnel_filter->tunnel_type) {
7705         case I40E_TUNNEL_TYPE_VXLAN:
7706                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7707                 break;
7708         case I40E_TUNNEL_TYPE_NVGRE:
7709                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7710                 break;
7711         case I40E_TUNNEL_TYPE_IP_IN_GRE:
7712                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7713                 break;
7714         case I40E_TUNNEL_TYPE_MPLSoUDP:
7715                 if (!pf->mpls_replace_flag) {
7716                         i40e_replace_mpls_l1_filter(pf);
7717                         i40e_replace_mpls_cloud_filter(pf);
7718                         pf->mpls_replace_flag = 1;
7719                 }
7720                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7721                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7722                         teid_le >> 4;
7723                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7724                         (teid_le & 0xF) << 12;
7725                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7726                         0x40;
7727                 big_buffer = 1;
7728                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
7729                 break;
7730         case I40E_TUNNEL_TYPE_MPLSoGRE:
7731                 if (!pf->mpls_replace_flag) {
7732                         i40e_replace_mpls_l1_filter(pf);
7733                         i40e_replace_mpls_cloud_filter(pf);
7734                         pf->mpls_replace_flag = 1;
7735                 }
7736                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7737                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7738                         teid_le >> 4;
7739                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7740                         (teid_le & 0xF) << 12;
7741                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7742                         0x0;
7743                 big_buffer = 1;
7744                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
7745                 break;
7746         case I40E_TUNNEL_TYPE_GTPC:
7747                 if (!pf->gtp_replace_flag) {
7748                         i40e_replace_gtp_l1_filter(pf);
7749                         i40e_replace_gtp_cloud_filter(pf);
7750                         pf->gtp_replace_flag = 1;
7751                 }
7752                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7753                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
7754                         (teid_le >> 16) & 0xFFFF;
7755                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
7756                         teid_le & 0xFFFF;
7757                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
7758                         0x0;
7759                 big_buffer = 1;
7760                 break;
7761         case I40E_TUNNEL_TYPE_GTPU:
7762                 if (!pf->gtp_replace_flag) {
7763                         i40e_replace_gtp_l1_filter(pf);
7764                         i40e_replace_gtp_cloud_filter(pf);
7765                         pf->gtp_replace_flag = 1;
7766                 }
7767                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7768                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
7769                         (teid_le >> 16) & 0xFFFF;
7770                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
7771                         teid_le & 0xFFFF;
7772                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
7773                         0x0;
7774                 big_buffer = 1;
7775                 break;
7776         case I40E_TUNNEL_TYPE_QINQ:
7777                 if (!pf->qinq_replace_flag) {
7778                         ret = i40e_cloud_filter_qinq_create(pf);
7779                         if (ret < 0)
7780                                 PMD_DRV_LOG(DEBUG,
7781                                             "QinQ tunnel filter already created.");
7782                         pf->qinq_replace_flag = 1;
7783                 }
7784                 /*      Add in the General fields the values of
7785                  *      the Outer and Inner VLAN
7786                  *      Big Buffer should be set, see changes in
7787                  *      i40e_aq_add_cloud_filters
7788                  */
7789                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
7790                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
7791                 big_buffer = 1;
7792                 break;
7793         default:
7794                 /* Other tunnel types is not supported. */
7795                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7796                 rte_free(cld_filter);
7797                 return -EINVAL;
7798         }
7799
7800         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
7801                 pfilter->element.flags =
7802                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7803         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
7804                 pfilter->element.flags =
7805                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7806         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
7807                 pfilter->element.flags =
7808                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7809         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
7810                 pfilter->element.flags =
7811                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7812         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
7813                 pfilter->element.flags |=
7814                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
7815         else {
7816                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7817                                                 &pfilter->element.flags);
7818                 if (val < 0) {
7819                         rte_free(cld_filter);
7820                         return -EINVAL;
7821                 }
7822         }
7823
7824         pfilter->element.flags |= rte_cpu_to_le_16(
7825                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7826                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7827         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7828         pfilter->element.queue_number =
7829                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7830
7831         if (!tunnel_filter->is_to_vf)
7832                 vsi = pf->main_vsi;
7833         else {
7834                 if (tunnel_filter->vf_id >= pf->vf_num) {
7835                         PMD_DRV_LOG(ERR, "Invalid argument.");
7836                         rte_free(cld_filter);
7837                         return -EINVAL;
7838                 }
7839                 vf = &pf->vfs[tunnel_filter->vf_id];
7840                 vsi = vf->vsi;
7841         }
7842
7843         /* Check if there is the filter in SW list */
7844         memset(&check_filter, 0, sizeof(check_filter));
7845         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7846         check_filter.is_to_vf = tunnel_filter->is_to_vf;
7847         check_filter.vf_id = tunnel_filter->vf_id;
7848         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7849         if (add && node) {
7850                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7851                 rte_free(cld_filter);
7852                 return -EINVAL;
7853         }
7854
7855         if (!add && !node) {
7856                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7857                 rte_free(cld_filter);
7858                 return -EINVAL;
7859         }
7860
7861         if (add) {
7862                 if (big_buffer)
7863                         ret = i40e_aq_add_cloud_filters_big_buffer(hw,
7864                                                    vsi->seid, cld_filter, 1);
7865                 else
7866                         ret = i40e_aq_add_cloud_filters(hw,
7867                                         vsi->seid, &cld_filter->element, 1);
7868                 if (ret < 0) {
7869                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7870                         rte_free(cld_filter);
7871                         return -ENOTSUP;
7872                 }
7873                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7874                 if (tunnel == NULL) {
7875                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7876                         rte_free(cld_filter);
7877                         return -ENOMEM;
7878                 }
7879
7880                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7881                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7882                 if (ret < 0)
7883                         rte_free(tunnel);
7884         } else {
7885                 if (big_buffer)
7886                         ret = i40e_aq_remove_cloud_filters_big_buffer(
7887                                 hw, vsi->seid, cld_filter, 1);
7888                 else
7889                         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7890                                                    &cld_filter->element, 1);
7891                 if (ret < 0) {
7892                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7893                         rte_free(cld_filter);
7894                         return -ENOTSUP;
7895                 }
7896                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7897         }
7898
7899         rte_free(cld_filter);
7900         return ret;
7901 }
7902
7903 static int
7904 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
7905 {
7906         uint8_t i;
7907
7908         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7909                 if (pf->vxlan_ports[i] == port)
7910                         return i;
7911         }
7912
7913         return -1;
7914 }
7915
7916 static int
7917 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
7918 {
7919         int  idx, ret;
7920         uint8_t filter_idx;
7921         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7922
7923         idx = i40e_get_vxlan_port_idx(pf, port);
7924
7925         /* Check if port already exists */
7926         if (idx >= 0) {
7927                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
7928                 return -EINVAL;
7929         }
7930
7931         /* Now check if there is space to add the new port */
7932         idx = i40e_get_vxlan_port_idx(pf, 0);
7933         if (idx < 0) {
7934                 PMD_DRV_LOG(ERR,
7935                         "Maximum number of UDP ports reached, not adding port %d",
7936                         port);
7937                 return -ENOSPC;
7938         }
7939
7940         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
7941                                         &filter_idx, NULL);
7942         if (ret < 0) {
7943                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
7944                 return -1;
7945         }
7946
7947         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
7948                          port,  filter_idx);
7949
7950         /* New port: add it and mark its index in the bitmap */
7951         pf->vxlan_ports[idx] = port;
7952         pf->vxlan_bitmap |= (1 << idx);
7953
7954         if (!(pf->flags & I40E_FLAG_VXLAN))
7955                 pf->flags |= I40E_FLAG_VXLAN;
7956
7957         return 0;
7958 }
7959
7960 static int
7961 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
7962 {
7963         int idx;
7964         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7965
7966         if (!(pf->flags & I40E_FLAG_VXLAN)) {
7967                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
7968                 return -EINVAL;
7969         }
7970
7971         idx = i40e_get_vxlan_port_idx(pf, port);
7972
7973         if (idx < 0) {
7974                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
7975                 return -EINVAL;
7976         }
7977
7978         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
7979                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
7980                 return -1;
7981         }
7982
7983         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
7984                         port, idx);
7985
7986         pf->vxlan_ports[idx] = 0;
7987         pf->vxlan_bitmap &= ~(1 << idx);
7988
7989         if (!pf->vxlan_bitmap)
7990                 pf->flags &= ~I40E_FLAG_VXLAN;
7991
7992         return 0;
7993 }
7994
7995 /* Add UDP tunneling port */
7996 static int
7997 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
7998                              struct rte_eth_udp_tunnel *udp_tunnel)
7999 {
8000         int ret = 0;
8001         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8002
8003         if (udp_tunnel == NULL)
8004                 return -EINVAL;
8005
8006         switch (udp_tunnel->prot_type) {
8007         case RTE_TUNNEL_TYPE_VXLAN:
8008                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
8009                 break;
8010
8011         case RTE_TUNNEL_TYPE_GENEVE:
8012         case RTE_TUNNEL_TYPE_TEREDO:
8013                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8014                 ret = -1;
8015                 break;
8016
8017         default:
8018                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8019                 ret = -1;
8020                 break;
8021         }
8022
8023         return ret;
8024 }
8025
8026 /* Remove UDP tunneling port */
8027 static int
8028 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8029                              struct rte_eth_udp_tunnel *udp_tunnel)
8030 {
8031         int ret = 0;
8032         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8033
8034         if (udp_tunnel == NULL)
8035                 return -EINVAL;
8036
8037         switch (udp_tunnel->prot_type) {
8038         case RTE_TUNNEL_TYPE_VXLAN:
8039                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8040                 break;
8041         case RTE_TUNNEL_TYPE_GENEVE:
8042         case RTE_TUNNEL_TYPE_TEREDO:
8043                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8044                 ret = -1;
8045                 break;
8046         default:
8047                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8048                 ret = -1;
8049                 break;
8050         }
8051
8052         return ret;
8053 }
8054
8055 /* Calculate the maximum number of contiguous PF queues that are configured */
8056 static int
8057 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8058 {
8059         struct rte_eth_dev_data *data = pf->dev_data;
8060         int i, num;
8061         struct i40e_rx_queue *rxq;
8062
8063         num = 0;
8064         for (i = 0; i < pf->lan_nb_qps; i++) {
8065                 rxq = data->rx_queues[i];
8066                 if (rxq && rxq->q_set)
8067                         num++;
8068                 else
8069                         break;
8070         }
8071
8072         return num;
8073 }
8074
8075 /* Configure RSS */
8076 static int
8077 i40e_pf_config_rss(struct i40e_pf *pf)
8078 {
8079         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8080         struct rte_eth_rss_conf rss_conf;
8081         uint32_t i, lut = 0;
8082         uint16_t j, num;
8083
8084         /*
8085          * If both VMDQ and RSS enabled, not all of PF queues are configured.
8086          * It's necessary to calculate the actual PF queues that are configured.
8087          */
8088         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8089                 num = i40e_pf_calc_configured_queues_num(pf);
8090         else
8091                 num = pf->dev_data->nb_rx_queues;
8092
8093         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8094         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
8095                         num);
8096
8097         if (num == 0) {
8098                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
8099                 return -ENOTSUP;
8100         }
8101
8102         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
8103                 if (j == num)
8104                         j = 0;
8105                 lut = (lut << 8) | (j & ((0x1 <<
8106                         hw->func_caps.rss_table_entry_width) - 1));
8107                 if ((i & 3) == 3)
8108                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
8109         }
8110
8111         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
8112         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
8113                 i40e_pf_disable_rss(pf);
8114                 return 0;
8115         }
8116         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
8117                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
8118                 /* Random default keys */
8119                 static uint32_t rss_key_default[] = {0x6b793944,
8120                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8121                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8122                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8123
8124                 rss_conf.rss_key = (uint8_t *)rss_key_default;
8125                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8126                                                         sizeof(uint32_t);
8127         }
8128
8129         return i40e_hw_rss_hash_set(pf, &rss_conf);
8130 }
8131
8132 static int
8133 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
8134                                struct rte_eth_tunnel_filter_conf *filter)
8135 {
8136         if (pf == NULL || filter == NULL) {
8137                 PMD_DRV_LOG(ERR, "Invalid parameter");
8138                 return -EINVAL;
8139         }
8140
8141         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
8142                 PMD_DRV_LOG(ERR, "Invalid queue ID");
8143                 return -EINVAL;
8144         }
8145
8146         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
8147                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
8148                 return -EINVAL;
8149         }
8150
8151         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
8152                 (is_zero_ether_addr(&filter->outer_mac))) {
8153                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
8154                 return -EINVAL;
8155         }
8156
8157         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
8158                 (is_zero_ether_addr(&filter->inner_mac))) {
8159                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
8160                 return -EINVAL;
8161         }
8162
8163         return 0;
8164 }
8165
8166 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8167 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8168 static int
8169 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8170 {
8171         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8172         uint32_t val, reg;
8173         int ret = -EINVAL;
8174
8175         if (pf->support_multi_driver) {
8176                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8177                 return -ENOTSUP;
8178         }
8179
8180         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8181         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8182
8183         if (len == 3) {
8184                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8185         } else if (len == 4) {
8186                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8187         } else {
8188                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8189                 return ret;
8190         }
8191
8192         if (reg != val) {
8193                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
8194                                                    reg, NULL);
8195                 if (ret != 0)
8196                         return ret;
8197                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8198                             "with value 0x%08x",
8199                             I40E_GL_PRS_FVBM(2), reg);
8200                 i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
8201         } else {
8202                 ret = 0;
8203         }
8204         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8205                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8206
8207         return ret;
8208 }
8209
8210 static int
8211 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
8212 {
8213         int ret = -EINVAL;
8214
8215         if (!hw || !cfg)
8216                 return -EINVAL;
8217
8218         switch (cfg->cfg_type) {
8219         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
8220                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
8221                 break;
8222         default:
8223                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
8224                 break;
8225         }
8226
8227         return ret;
8228 }
8229
8230 static int
8231 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
8232                                enum rte_filter_op filter_op,
8233                                void *arg)
8234 {
8235         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8236         int ret = I40E_ERR_PARAM;
8237
8238         switch (filter_op) {
8239         case RTE_ETH_FILTER_SET:
8240                 ret = i40e_dev_global_config_set(hw,
8241                         (struct rte_eth_global_cfg *)arg);
8242                 break;
8243         default:
8244                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8245                 break;
8246         }
8247
8248         return ret;
8249 }
8250
8251 static int
8252 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
8253                           enum rte_filter_op filter_op,
8254                           void *arg)
8255 {
8256         struct rte_eth_tunnel_filter_conf *filter;
8257         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8258         int ret = I40E_SUCCESS;
8259
8260         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
8261
8262         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
8263                 return I40E_ERR_PARAM;
8264
8265         switch (filter_op) {
8266         case RTE_ETH_FILTER_NOP:
8267                 if (!(pf->flags & I40E_FLAG_VXLAN))
8268                         ret = I40E_NOT_SUPPORTED;
8269                 break;
8270         case RTE_ETH_FILTER_ADD:
8271                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
8272                 break;
8273         case RTE_ETH_FILTER_DELETE:
8274                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
8275                 break;
8276         default:
8277                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8278                 ret = I40E_ERR_PARAM;
8279                 break;
8280         }
8281
8282         return ret;
8283 }
8284
8285 static int
8286 i40e_pf_config_mq_rx(struct i40e_pf *pf)
8287 {
8288         int ret = 0;
8289         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8290
8291         /* RSS setup */
8292         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
8293                 ret = i40e_pf_config_rss(pf);
8294         else
8295                 i40e_pf_disable_rss(pf);
8296
8297         return ret;
8298 }
8299
8300 /* Get the symmetric hash enable configurations per port */
8301 static void
8302 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
8303 {
8304         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8305
8306         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
8307 }
8308
8309 /* Set the symmetric hash enable configurations per port */
8310 static void
8311 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8312 {
8313         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8314
8315         if (enable > 0) {
8316                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
8317                         PMD_DRV_LOG(INFO,
8318                                 "Symmetric hash has already been enabled");
8319                         return;
8320                 }
8321                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8322         } else {
8323                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
8324                         PMD_DRV_LOG(INFO,
8325                                 "Symmetric hash has already been disabled");
8326                         return;
8327                 }
8328                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8329         }
8330         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8331         I40E_WRITE_FLUSH(hw);
8332 }
8333
8334 /*
8335  * Get global configurations of hash function type and symmetric hash enable
8336  * per flow type (pctype). Note that global configuration means it affects all
8337  * the ports on the same NIC.
8338  */
8339 static int
8340 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
8341                                    struct rte_eth_hash_global_conf *g_cfg)
8342 {
8343         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8344         uint32_t reg;
8345         uint16_t i, j;
8346
8347         memset(g_cfg, 0, sizeof(*g_cfg));
8348         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8349         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
8350                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
8351         else
8352                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
8353         PMD_DRV_LOG(DEBUG, "Hash function is %s",
8354                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
8355
8356         /*
8357          * As i40e supports less than 64 flow types, only first 64 bits need to
8358          * be checked.
8359          */
8360         for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8361                 g_cfg->valid_bit_mask[i] = 0ULL;
8362                 g_cfg->sym_hash_enable_mask[i] = 0ULL;
8363         }
8364
8365         g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
8366
8367         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
8368                 if (!adapter->pctypes_tbl[i])
8369                         continue;
8370                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8371                      j < I40E_FILTER_PCTYPE_MAX; j++) {
8372                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
8373                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
8374                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8375                                         g_cfg->sym_hash_enable_mask[0] |=
8376                                                                 (1ULL << i);
8377                                 }
8378                         }
8379                 }
8380         }
8381
8382         return 0;
8383 }
8384
8385 static int
8386 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
8387                               const struct rte_eth_hash_global_conf *g_cfg)
8388 {
8389         uint32_t i;
8390         uint64_t mask0, i40e_mask = adapter->flow_types_mask;
8391
8392         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
8393                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
8394                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
8395                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
8396                                                 g_cfg->hash_func);
8397                 return -EINVAL;
8398         }
8399
8400         /*
8401          * As i40e supports less than 64 flow types, only first 64 bits need to
8402          * be checked.
8403          */
8404         mask0 = g_cfg->valid_bit_mask[0];
8405         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8406                 if (i == 0) {
8407                         /* Check if any unsupported flow type configured */
8408                         if ((mask0 | i40e_mask) ^ i40e_mask)
8409                                 goto mask_err;
8410                 } else {
8411                         if (g_cfg->valid_bit_mask[i])
8412                                 goto mask_err;
8413                 }
8414         }
8415
8416         return 0;
8417
8418 mask_err:
8419         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
8420
8421         return -EINVAL;
8422 }
8423
8424 /*
8425  * Set global configurations of hash function type and symmetric hash enable
8426  * per flow type (pctype). Note any modifying global configuration will affect
8427  * all the ports on the same NIC.
8428  */
8429 static int
8430 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
8431                                    struct rte_eth_hash_global_conf *g_cfg)
8432 {
8433         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8434         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8435         int ret;
8436         uint16_t i, j;
8437         uint32_t reg;
8438         uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
8439
8440         if (pf->support_multi_driver) {
8441                 PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
8442                 return -ENOTSUP;
8443         }
8444
8445         /* Check the input parameters */
8446         ret = i40e_hash_global_config_check(adapter, g_cfg);
8447         if (ret < 0)
8448                 return ret;
8449
8450         /*
8451          * As i40e supports less than 64 flow types, only first 64 bits need to
8452          * be configured.
8453          */
8454         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
8455                 if (mask0 & (1UL << i)) {
8456                         reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
8457                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
8458
8459                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8460                              j < I40E_FILTER_PCTYPE_MAX; j++) {
8461                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
8462                                         i40e_write_global_rx_ctl(hw,
8463                                                           I40E_GLQF_HSYM(j),
8464                                                           reg);
8465                         }
8466                         i40e_global_cfg_warning(I40E_WARNING_HSYM);
8467                 }
8468         }
8469
8470         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8471         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
8472                 /* Toeplitz */
8473                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
8474                         PMD_DRV_LOG(DEBUG,
8475                                 "Hash function already set to Toeplitz");
8476                         goto out;
8477                 }
8478                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
8479         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
8480                 /* Simple XOR */
8481                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
8482                         PMD_DRV_LOG(DEBUG,
8483                                 "Hash function already set to Simple XOR");
8484                         goto out;
8485                 }
8486                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
8487         } else
8488                 /* Use the default, and keep it as it is */
8489                 goto out;
8490
8491         i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
8492         i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
8493
8494 out:
8495         I40E_WRITE_FLUSH(hw);
8496
8497         return 0;
8498 }
8499
8500 /**
8501  * Valid input sets for hash and flow director filters per PCTYPE
8502  */
8503 static uint64_t
8504 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8505                 enum rte_filter_type filter)
8506 {
8507         uint64_t valid;
8508
8509         static const uint64_t valid_hash_inset_table[] = {
8510                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8511                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8512                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8513                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8514                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8515                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8516                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8517                         I40E_INSET_FLEX_PAYLOAD,
8518                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8519                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8520                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8521                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8522                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8523                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8524                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8525                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8526                         I40E_INSET_FLEX_PAYLOAD,
8527                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8528                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8529                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8530                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8531                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8532                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8533                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8534                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8535                         I40E_INSET_FLEX_PAYLOAD,
8536                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8537                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8538                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8539                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8540                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8541                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8542                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8543                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8544                         I40E_INSET_FLEX_PAYLOAD,
8545                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8546                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8547                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8548                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8549                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8550                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8551                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8552                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8553                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8554                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8555                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8556                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8557                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8558                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8559                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8560                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8561                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8562                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8563                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8564                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8565                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8566                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8567                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8568                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8569                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8570                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8571                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
8572                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8573                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8574                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8575                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8576                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8577                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8578                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8579                         I40E_INSET_FLEX_PAYLOAD,
8580                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8581                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8582                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8583                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8584                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8585                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
8586                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
8587                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
8588                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8589                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8590                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8591                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8592                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8593                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8594                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8595                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
8596                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8597                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8598                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8599                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8600                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8601                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8602                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8603                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8604                         I40E_INSET_FLEX_PAYLOAD,
8605                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8606                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8607                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8608                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8609                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8610                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8611                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8612                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8613                         I40E_INSET_FLEX_PAYLOAD,
8614                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8615                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8616                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8617                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8618                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8619                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8620                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8621                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8622                         I40E_INSET_FLEX_PAYLOAD,
8623                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8624                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8625                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8626                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8627                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8628                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8629                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8630                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8631                         I40E_INSET_FLEX_PAYLOAD,
8632                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8633                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8634                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8635                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8636                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8637                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8638                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8639                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8640                         I40E_INSET_FLEX_PAYLOAD,
8641                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8642                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8643                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8644                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8645                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8646                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8647                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8648                         I40E_INSET_FLEX_PAYLOAD,
8649                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8650                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8651                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8652                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8653                         I40E_INSET_FLEX_PAYLOAD,
8654         };
8655
8656         /**
8657          * Flow director supports only fields defined in
8658          * union rte_eth_fdir_flow.
8659          */
8660         static const uint64_t valid_fdir_inset_table[] = {
8661                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8662                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8663                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8664                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8665                 I40E_INSET_IPV4_TTL,
8666                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8667                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8668                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8669                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8670                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8671                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8672                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8673                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8674                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8675                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8676                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8677                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8678                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8679                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8680                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8681                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8682                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8683                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8684                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8685                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8686                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8687                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8688                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8689                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8690                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8691                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8692                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8693                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8694                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8695                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8696                 I40E_INSET_SCTP_VT,
8697                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8698                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8699                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8700                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8701                 I40E_INSET_IPV4_TTL,
8702                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8703                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8704                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8705                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8706                 I40E_INSET_IPV6_HOP_LIMIT,
8707                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8708                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8709                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8710                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8711                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8712                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8713                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8714                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8715                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8716                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8717                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8718                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8719                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8720                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8721                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8722                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8723                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8724                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8725                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8726                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8727                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8728                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8729                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8730                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8731                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8732                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8733                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8734                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8735                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8736                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8737                 I40E_INSET_SCTP_VT,
8738                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8739                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8740                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8741                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8742                 I40E_INSET_IPV6_HOP_LIMIT,
8743                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8744                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8745                 I40E_INSET_LAST_ETHER_TYPE,
8746         };
8747
8748         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8749                 return 0;
8750         if (filter == RTE_ETH_FILTER_HASH)
8751                 valid = valid_hash_inset_table[pctype];
8752         else
8753                 valid = valid_fdir_inset_table[pctype];
8754
8755         return valid;
8756 }
8757
8758 /**
8759  * Validate if the input set is allowed for a specific PCTYPE
8760  */
8761 int
8762 i40e_validate_input_set(enum i40e_filter_pctype pctype,
8763                 enum rte_filter_type filter, uint64_t inset)
8764 {
8765         uint64_t valid;
8766
8767         valid = i40e_get_valid_input_set(pctype, filter);
8768         if (inset & (~valid))
8769                 return -EINVAL;
8770
8771         return 0;
8772 }
8773
8774 /* default input set fields combination per pctype */
8775 uint64_t
8776 i40e_get_default_input_set(uint16_t pctype)
8777 {
8778         static const uint64_t default_inset_table[] = {
8779                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8780                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8781                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8782                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8783                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8784                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8785                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8786                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8787                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8788                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8789                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8790                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8791                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8792                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8793                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8794                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8795                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8796                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8797                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8798                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8799                         I40E_INSET_SCTP_VT,
8800                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8801                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8802                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8803                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8804                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8805                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8806                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8807                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8808                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8809                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8810                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8811                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8812                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8813                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8814                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8815                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8816                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8817                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8818                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8819                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8820                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8821                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8822                         I40E_INSET_SCTP_VT,
8823                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8824                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8825                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8826                         I40E_INSET_LAST_ETHER_TYPE,
8827         };
8828
8829         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8830                 return 0;
8831
8832         return default_inset_table[pctype];
8833 }
8834
8835 /**
8836  * Parse the input set from index to logical bit masks
8837  */
8838 static int
8839 i40e_parse_input_set(uint64_t *inset,
8840                      enum i40e_filter_pctype pctype,
8841                      enum rte_eth_input_set_field *field,
8842                      uint16_t size)
8843 {
8844         uint16_t i, j;
8845         int ret = -EINVAL;
8846
8847         static const struct {
8848                 enum rte_eth_input_set_field field;
8849                 uint64_t inset;
8850         } inset_convert_table[] = {
8851                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
8852                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
8853                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
8854                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
8855                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
8856                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
8857                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
8858                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
8859                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
8860                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
8861                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
8862                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
8863                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
8864                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
8865                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
8866                         I40E_INSET_IPV6_NEXT_HDR},
8867                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
8868                         I40E_INSET_IPV6_HOP_LIMIT},
8869                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
8870                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
8871                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
8872                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
8873                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
8874                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
8875                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
8876                         I40E_INSET_SCTP_VT},
8877                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
8878                         I40E_INSET_TUNNEL_DMAC},
8879                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
8880                         I40E_INSET_VLAN_TUNNEL},
8881                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
8882                         I40E_INSET_TUNNEL_ID},
8883                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
8884                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
8885                         I40E_INSET_FLEX_PAYLOAD_W1},
8886                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
8887                         I40E_INSET_FLEX_PAYLOAD_W2},
8888                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
8889                         I40E_INSET_FLEX_PAYLOAD_W3},
8890                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
8891                         I40E_INSET_FLEX_PAYLOAD_W4},
8892                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
8893                         I40E_INSET_FLEX_PAYLOAD_W5},
8894                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
8895                         I40E_INSET_FLEX_PAYLOAD_W6},
8896                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
8897                         I40E_INSET_FLEX_PAYLOAD_W7},
8898                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
8899                         I40E_INSET_FLEX_PAYLOAD_W8},
8900         };
8901
8902         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
8903                 return ret;
8904
8905         /* Only one item allowed for default or all */
8906         if (size == 1) {
8907                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
8908                         *inset = i40e_get_default_input_set(pctype);
8909                         return 0;
8910                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
8911                         *inset = I40E_INSET_NONE;
8912                         return 0;
8913                 }
8914         }
8915
8916         for (i = 0, *inset = 0; i < size; i++) {
8917                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
8918                         if (field[i] == inset_convert_table[j].field) {
8919                                 *inset |= inset_convert_table[j].inset;
8920                                 break;
8921                         }
8922                 }
8923
8924                 /* It contains unsupported input set, return immediately */
8925                 if (j == RTE_DIM(inset_convert_table))
8926                         return ret;
8927         }
8928
8929         return 0;
8930 }
8931
8932 /**
8933  * Translate the input set from bit masks to register aware bit masks
8934  * and vice versa
8935  */
8936 uint64_t
8937 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
8938 {
8939         uint64_t val = 0;
8940         uint16_t i;
8941
8942         struct inset_map {
8943                 uint64_t inset;
8944                 uint64_t inset_reg;
8945         };
8946
8947         static const struct inset_map inset_map_common[] = {
8948                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
8949                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
8950                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
8951                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
8952                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
8953                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
8954                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
8955                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
8956                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
8957                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
8958                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
8959                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
8960                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
8961                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
8962                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
8963                 {I40E_INSET_TUNNEL_DMAC,
8964                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
8965                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
8966                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
8967                 {I40E_INSET_TUNNEL_SRC_PORT,
8968                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
8969                 {I40E_INSET_TUNNEL_DST_PORT,
8970                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
8971                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
8972                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
8973                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
8974                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
8975                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
8976                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
8977                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
8978                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
8979                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
8980         };
8981
8982     /* some different registers map in x722*/
8983         static const struct inset_map inset_map_diff_x722[] = {
8984                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
8985                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
8986                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
8987                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
8988         };
8989
8990         static const struct inset_map inset_map_diff_not_x722[] = {
8991                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
8992                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
8993                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
8994                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
8995         };
8996
8997         if (input == 0)
8998                 return val;
8999
9000         /* Translate input set to register aware inset */
9001         if (type == I40E_MAC_X722) {
9002                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9003                         if (input & inset_map_diff_x722[i].inset)
9004                                 val |= inset_map_diff_x722[i].inset_reg;
9005                 }
9006         } else {
9007                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9008                         if (input & inset_map_diff_not_x722[i].inset)
9009                                 val |= inset_map_diff_not_x722[i].inset_reg;
9010                 }
9011         }
9012
9013         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9014                 if (input & inset_map_common[i].inset)
9015                         val |= inset_map_common[i].inset_reg;
9016         }
9017
9018         return val;
9019 }
9020
9021 int
9022 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9023 {
9024         uint8_t i, idx = 0;
9025         uint64_t inset_need_mask = inset;
9026
9027         static const struct {
9028                 uint64_t inset;
9029                 uint32_t mask;
9030         } inset_mask_map[] = {
9031                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
9032                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
9033                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
9034                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
9035                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
9036                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
9037                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
9038                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
9039         };
9040
9041         if (!inset || !mask || !nb_elem)
9042                 return 0;
9043
9044         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9045                 /* Clear the inset bit, if no MASK is required,
9046                  * for example proto + ttl
9047                  */
9048                 if ((inset & inset_mask_map[i].inset) ==
9049                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
9050                         inset_need_mask &= ~inset_mask_map[i].inset;
9051                 if (!inset_need_mask)
9052                         return 0;
9053         }
9054         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9055                 if ((inset_need_mask & inset_mask_map[i].inset) ==
9056                     inset_mask_map[i].inset) {
9057                         if (idx >= nb_elem) {
9058                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
9059                                 return -EINVAL;
9060                         }
9061                         mask[idx] = inset_mask_map[i].mask;
9062                         idx++;
9063                 }
9064         }
9065
9066         return idx;
9067 }
9068
9069 void
9070 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9071 {
9072         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9073
9074         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9075         if (reg != val)
9076                 i40e_write_rx_ctl(hw, addr, val);
9077         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9078                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9079 }
9080
9081 void
9082 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9083 {
9084         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9085
9086         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9087         if (reg != val)
9088                 i40e_write_global_rx_ctl(hw, addr, val);
9089         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9090                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9091 }
9092
9093 static void
9094 i40e_filter_input_set_init(struct i40e_pf *pf)
9095 {
9096         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9097         enum i40e_filter_pctype pctype;
9098         uint64_t input_set, inset_reg;
9099         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9100         int num, i;
9101         uint16_t flow_type;
9102
9103         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9104              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9105                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9106
9107                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9108                         continue;
9109
9110                 input_set = i40e_get_default_input_set(pctype);
9111
9112                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9113                                                    I40E_INSET_MASK_NUM_REG);
9114                 if (num < 0)
9115                         return;
9116                 if (pf->support_multi_driver && num > 0) {
9117                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9118                         return;
9119                 }
9120                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9121                                         input_set);
9122
9123                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9124                                       (uint32_t)(inset_reg & UINT32_MAX));
9125                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9126                                      (uint32_t)((inset_reg >>
9127                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
9128                 if (!pf->support_multi_driver) {
9129                         i40e_check_write_global_reg(hw,
9130                                             I40E_GLQF_HASH_INSET(0, pctype),
9131                                             (uint32_t)(inset_reg & UINT32_MAX));
9132                         i40e_check_write_global_reg(hw,
9133                                              I40E_GLQF_HASH_INSET(1, pctype),
9134                                              (uint32_t)((inset_reg >>
9135                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
9136
9137                         for (i = 0; i < num; i++) {
9138                                 i40e_check_write_global_reg(hw,
9139                                                     I40E_GLQF_FD_MSK(i, pctype),
9140                                                     mask_reg[i]);
9141                                 i40e_check_write_global_reg(hw,
9142                                                   I40E_GLQF_HASH_MSK(i, pctype),
9143                                                   mask_reg[i]);
9144                         }
9145                         /*clear unused mask registers of the pctype */
9146                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9147                                 i40e_check_write_global_reg(hw,
9148                                                     I40E_GLQF_FD_MSK(i, pctype),
9149                                                     0);
9150                                 i40e_check_write_global_reg(hw,
9151                                                   I40E_GLQF_HASH_MSK(i, pctype),
9152                                                   0);
9153                         }
9154                 } else {
9155                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9156                 }
9157                 I40E_WRITE_FLUSH(hw);
9158
9159                 /* store the default input set */
9160                 if (!pf->support_multi_driver)
9161                         pf->hash_input_set[pctype] = input_set;
9162                 pf->fdir.input_set[pctype] = input_set;
9163         }
9164
9165         if (!pf->support_multi_driver) {
9166                 i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9167                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9168                 i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9169         }
9170 }
9171
9172 int
9173 i40e_hash_filter_inset_select(struct i40e_hw *hw,
9174                          struct rte_eth_input_set_conf *conf)
9175 {
9176         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9177         enum i40e_filter_pctype pctype;
9178         uint64_t input_set, inset_reg = 0;
9179         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9180         int ret, i, num;
9181
9182         if (!conf) {
9183                 PMD_DRV_LOG(ERR, "Invalid pointer");
9184                 return -EFAULT;
9185         }
9186         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9187             conf->op != RTE_ETH_INPUT_SET_ADD) {
9188                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9189                 return -EINVAL;
9190         }
9191
9192         if (pf->support_multi_driver) {
9193                 PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
9194                 return -ENOTSUP;
9195         }
9196
9197         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9198         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9199                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9200                 return -EINVAL;
9201         }
9202
9203         if (hw->mac.type == I40E_MAC_X722) {
9204                 /* get translated pctype value in fd pctype register */
9205                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
9206                         I40E_GLQF_FD_PCTYPES((int)pctype));
9207         }
9208
9209         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9210                                    conf->inset_size);
9211         if (ret) {
9212                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9213                 return -EINVAL;
9214         }
9215
9216         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
9217                 /* get inset value in register */
9218                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9219                 inset_reg <<= I40E_32_BIT_WIDTH;
9220                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9221                 input_set |= pf->hash_input_set[pctype];
9222         }
9223         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9224                                            I40E_INSET_MASK_NUM_REG);
9225         if (num < 0)
9226                 return -EINVAL;
9227
9228         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9229
9230         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9231                                     (uint32_t)(inset_reg & UINT32_MAX));
9232         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9233                                     (uint32_t)((inset_reg >>
9234                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
9235         i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9236
9237         for (i = 0; i < num; i++)
9238                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9239                                             mask_reg[i]);
9240         /*clear unused mask registers of the pctype */
9241         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9242                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9243                                             0);
9244         i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9245         I40E_WRITE_FLUSH(hw);
9246
9247         pf->hash_input_set[pctype] = input_set;
9248         return 0;
9249 }
9250
9251 int
9252 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
9253                          struct rte_eth_input_set_conf *conf)
9254 {
9255         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9256         enum i40e_filter_pctype pctype;
9257         uint64_t input_set, inset_reg = 0;
9258         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9259         int ret, i, num;
9260
9261         if (!hw || !conf) {
9262                 PMD_DRV_LOG(ERR, "Invalid pointer");
9263                 return -EFAULT;
9264         }
9265         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9266             conf->op != RTE_ETH_INPUT_SET_ADD) {
9267                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9268                 return -EINVAL;
9269         }
9270
9271         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9272
9273         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9274                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9275                 return -EINVAL;
9276         }
9277
9278         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9279                                    conf->inset_size);
9280         if (ret) {
9281                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9282                 return -EINVAL;
9283         }
9284
9285         /* get inset value in register */
9286         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
9287         inset_reg <<= I40E_32_BIT_WIDTH;
9288         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
9289
9290         /* Can not change the inset reg for flex payload for fdir,
9291          * it is done by writing I40E_PRTQF_FD_FLXINSET
9292          * in i40e_set_flex_mask_on_pctype.
9293          */
9294         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
9295                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
9296         else
9297                 input_set |= pf->fdir.input_set[pctype];
9298         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9299                                            I40E_INSET_MASK_NUM_REG);
9300         if (num < 0)
9301                 return -EINVAL;
9302         if (pf->support_multi_driver && num > 0) {
9303                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9304                 return -ENOTSUP;
9305         }
9306
9307         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9308
9309         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9310                               (uint32_t)(inset_reg & UINT32_MAX));
9311         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9312                              (uint32_t)((inset_reg >>
9313                              I40E_32_BIT_WIDTH) & UINT32_MAX));
9314
9315         if (!pf->support_multi_driver) {
9316                 for (i = 0; i < num; i++)
9317                         i40e_check_write_global_reg(hw,
9318                                                     I40E_GLQF_FD_MSK(i, pctype),
9319                                                     mask_reg[i]);
9320                 /*clear unused mask registers of the pctype */
9321                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9322                         i40e_check_write_global_reg(hw,
9323                                                     I40E_GLQF_FD_MSK(i, pctype),
9324                                                     0);
9325                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9326         } else {
9327                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9328         }
9329         I40E_WRITE_FLUSH(hw);
9330
9331         pf->fdir.input_set[pctype] = input_set;
9332         return 0;
9333 }
9334
9335 static int
9336 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9337 {
9338         int ret = 0;
9339
9340         if (!hw || !info) {
9341                 PMD_DRV_LOG(ERR, "Invalid pointer");
9342                 return -EFAULT;
9343         }
9344
9345         switch (info->info_type) {
9346         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9347                 i40e_get_symmetric_hash_enable_per_port(hw,
9348                                         &(info->info.enable));
9349                 break;
9350         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9351                 ret = i40e_get_hash_filter_global_config(hw,
9352                                 &(info->info.global_conf));
9353                 break;
9354         default:
9355                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9356                                                         info->info_type);
9357                 ret = -EINVAL;
9358                 break;
9359         }
9360
9361         return ret;
9362 }
9363
9364 static int
9365 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9366 {
9367         int ret = 0;
9368
9369         if (!hw || !info) {
9370                 PMD_DRV_LOG(ERR, "Invalid pointer");
9371                 return -EFAULT;
9372         }
9373
9374         switch (info->info_type) {
9375         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9376                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
9377                 break;
9378         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9379                 ret = i40e_set_hash_filter_global_config(hw,
9380                                 &(info->info.global_conf));
9381                 break;
9382         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
9383                 ret = i40e_hash_filter_inset_select(hw,
9384                                                &(info->info.input_set_conf));
9385                 break;
9386
9387         default:
9388                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9389                                                         info->info_type);
9390                 ret = -EINVAL;
9391                 break;
9392         }
9393
9394         return ret;
9395 }
9396
9397 /* Operations for hash function */
9398 static int
9399 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
9400                       enum rte_filter_op filter_op,
9401                       void *arg)
9402 {
9403         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9404         int ret = 0;
9405
9406         switch (filter_op) {
9407         case RTE_ETH_FILTER_NOP:
9408                 break;
9409         case RTE_ETH_FILTER_GET:
9410                 ret = i40e_hash_filter_get(hw,
9411                         (struct rte_eth_hash_filter_info *)arg);
9412                 break;
9413         case RTE_ETH_FILTER_SET:
9414                 ret = i40e_hash_filter_set(hw,
9415                         (struct rte_eth_hash_filter_info *)arg);
9416                 break;
9417         default:
9418                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
9419                                                                 filter_op);
9420                 ret = -ENOTSUP;
9421                 break;
9422         }
9423
9424         return ret;
9425 }
9426
9427 /* Convert ethertype filter structure */
9428 static int
9429 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9430                               struct i40e_ethertype_filter *filter)
9431 {
9432         rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
9433         filter->input.ether_type = input->ether_type;
9434         filter->flags = input->flags;
9435         filter->queue = input->queue;
9436
9437         return 0;
9438 }
9439
9440 /* Check if there exists the ehtertype filter */
9441 struct i40e_ethertype_filter *
9442 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9443                                 const struct i40e_ethertype_filter_input *input)
9444 {
9445         int ret;
9446
9447         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9448         if (ret < 0)
9449                 return NULL;
9450
9451         return ethertype_rule->hash_map[ret];
9452 }
9453
9454 /* Add ethertype filter in SW list */
9455 static int
9456 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9457                                 struct i40e_ethertype_filter *filter)
9458 {
9459         struct i40e_ethertype_rule *rule = &pf->ethertype;
9460         int ret;
9461
9462         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9463         if (ret < 0) {
9464                 PMD_DRV_LOG(ERR,
9465                             "Failed to insert ethertype filter"
9466                             " to hash table %d!",
9467                             ret);
9468                 return ret;
9469         }
9470         rule->hash_map[ret] = filter;
9471
9472         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9473
9474         return 0;
9475 }
9476
9477 /* Delete ethertype filter in SW list */
9478 int
9479 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9480                              struct i40e_ethertype_filter_input *input)
9481 {
9482         struct i40e_ethertype_rule *rule = &pf->ethertype;
9483         struct i40e_ethertype_filter *filter;
9484         int ret;
9485
9486         ret = rte_hash_del_key(rule->hash_table, input);
9487         if (ret < 0) {
9488                 PMD_DRV_LOG(ERR,
9489                             "Failed to delete ethertype filter"
9490                             " to hash table %d!",
9491                             ret);
9492                 return ret;
9493         }
9494         filter = rule->hash_map[ret];
9495         rule->hash_map[ret] = NULL;
9496
9497         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9498         rte_free(filter);
9499
9500         return 0;
9501 }
9502
9503 /*
9504  * Configure ethertype filter, which can director packet by filtering
9505  * with mac address and ether_type or only ether_type
9506  */
9507 int
9508 i40e_ethertype_filter_set(struct i40e_pf *pf,
9509                         struct rte_eth_ethertype_filter *filter,
9510                         bool add)
9511 {
9512         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9513         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9514         struct i40e_ethertype_filter *ethertype_filter, *node;
9515         struct i40e_ethertype_filter check_filter;
9516         struct i40e_control_filter_stats stats;
9517         uint16_t flags = 0;
9518         int ret;
9519
9520         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9521                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9522                 return -EINVAL;
9523         }
9524         if (filter->ether_type == ETHER_TYPE_IPv4 ||
9525                 filter->ether_type == ETHER_TYPE_IPv6) {
9526                 PMD_DRV_LOG(ERR,
9527                         "unsupported ether_type(0x%04x) in control packet filter.",
9528                         filter->ether_type);
9529                 return -EINVAL;
9530         }
9531         if (filter->ether_type == ETHER_TYPE_VLAN)
9532                 PMD_DRV_LOG(WARNING,
9533                         "filter vlan ether_type in first tag is not supported.");
9534
9535         /* Check if there is the filter in SW list */
9536         memset(&check_filter, 0, sizeof(check_filter));
9537         i40e_ethertype_filter_convert(filter, &check_filter);
9538         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9539                                                &check_filter.input);
9540         if (add && node) {
9541                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9542                 return -EINVAL;
9543         }
9544
9545         if (!add && !node) {
9546                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9547                 return -EINVAL;
9548         }
9549
9550         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9551                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9552         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9553                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9554         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9555
9556         memset(&stats, 0, sizeof(stats));
9557         ret = i40e_aq_add_rem_control_packet_filter(hw,
9558                         filter->mac_addr.addr_bytes,
9559                         filter->ether_type, flags,
9560                         pf->main_vsi->seid,
9561                         filter->queue, add, &stats, NULL);
9562
9563         PMD_DRV_LOG(INFO,
9564                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9565                 ret, stats.mac_etype_used, stats.etype_used,
9566                 stats.mac_etype_free, stats.etype_free);
9567         if (ret < 0)
9568                 return -ENOSYS;
9569
9570         /* Add or delete a filter in SW list */
9571         if (add) {
9572                 ethertype_filter = rte_zmalloc("ethertype_filter",
9573                                        sizeof(*ethertype_filter), 0);
9574                 if (ethertype_filter == NULL) {
9575                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9576                         return -ENOMEM;
9577                 }
9578
9579                 rte_memcpy(ethertype_filter, &check_filter,
9580                            sizeof(check_filter));
9581                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9582                 if (ret < 0)
9583                         rte_free(ethertype_filter);
9584         } else {
9585                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9586         }
9587
9588         return ret;
9589 }
9590
9591 /*
9592  * Handle operations for ethertype filter.
9593  */
9594 static int
9595 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
9596                                 enum rte_filter_op filter_op,
9597                                 void *arg)
9598 {
9599         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9600         int ret = 0;
9601
9602         if (filter_op == RTE_ETH_FILTER_NOP)
9603                 return ret;
9604
9605         if (arg == NULL) {
9606                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
9607                             filter_op);
9608                 return -EINVAL;
9609         }
9610
9611         switch (filter_op) {
9612         case RTE_ETH_FILTER_ADD:
9613                 ret = i40e_ethertype_filter_set(pf,
9614                         (struct rte_eth_ethertype_filter *)arg,
9615                         TRUE);
9616                 break;
9617         case RTE_ETH_FILTER_DELETE:
9618                 ret = i40e_ethertype_filter_set(pf,
9619                         (struct rte_eth_ethertype_filter *)arg,
9620                         FALSE);
9621                 break;
9622         default:
9623                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
9624                 ret = -ENOSYS;
9625                 break;
9626         }
9627         return ret;
9628 }
9629
9630 static int
9631 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9632                      enum rte_filter_type filter_type,
9633                      enum rte_filter_op filter_op,
9634                      void *arg)
9635 {
9636         int ret = 0;
9637
9638         if (dev == NULL)
9639                 return -EINVAL;
9640
9641         switch (filter_type) {
9642         case RTE_ETH_FILTER_NONE:
9643                 /* For global configuration */
9644                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9645                 break;
9646         case RTE_ETH_FILTER_HASH:
9647                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9648                 break;
9649         case RTE_ETH_FILTER_MACVLAN:
9650                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9651                 break;
9652         case RTE_ETH_FILTER_ETHERTYPE:
9653                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9654                 break;
9655         case RTE_ETH_FILTER_TUNNEL:
9656                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9657                 break;
9658         case RTE_ETH_FILTER_FDIR:
9659                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9660                 break;
9661         case RTE_ETH_FILTER_GENERIC:
9662                 if (filter_op != RTE_ETH_FILTER_GET)
9663                         return -EINVAL;
9664                 *(const void **)arg = &i40e_flow_ops;
9665                 break;
9666         default:
9667                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9668                                                         filter_type);
9669                 ret = -EINVAL;
9670                 break;
9671         }
9672
9673         return ret;
9674 }
9675
9676 /*
9677  * Check and enable Extended Tag.
9678  * Enabling Extended Tag is important for 40G performance.
9679  */
9680 static void
9681 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9682 {
9683         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9684         uint32_t buf = 0;
9685         int ret;
9686
9687         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9688                                       PCI_DEV_CAP_REG);
9689         if (ret < 0) {
9690                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9691                             PCI_DEV_CAP_REG);
9692                 return;
9693         }
9694         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9695                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9696                 return;
9697         }
9698
9699         buf = 0;
9700         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9701                                       PCI_DEV_CTRL_REG);
9702         if (ret < 0) {
9703                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9704                             PCI_DEV_CTRL_REG);
9705                 return;
9706         }
9707         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9708                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9709                 return;
9710         }
9711         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9712         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9713                                        PCI_DEV_CTRL_REG);
9714         if (ret < 0) {
9715                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9716                             PCI_DEV_CTRL_REG);
9717                 return;
9718         }
9719 }
9720
9721 /*
9722  * As some registers wouldn't be reset unless a global hardware reset,
9723  * hardware initialization is needed to put those registers into an
9724  * expected initial state.
9725  */
9726 static void
9727 i40e_hw_init(struct rte_eth_dev *dev)
9728 {
9729         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9730
9731         i40e_enable_extended_tag(dev);
9732
9733         /* clear the PF Queue Filter control register */
9734         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9735
9736         /* Disable symmetric hash per port */
9737         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9738 }
9739
9740 /*
9741  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9742  * however this function will return only one highest pctype index,
9743  * which is not quite correct. This is known problem of i40e driver
9744  * and needs to be fixed later.
9745  */
9746 enum i40e_filter_pctype
9747 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9748 {
9749         int i;
9750         uint64_t pctype_mask;
9751
9752         if (flow_type < I40E_FLOW_TYPE_MAX) {
9753                 pctype_mask = adapter->pctypes_tbl[flow_type];
9754                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9755                         if (pctype_mask & (1ULL << i))
9756                                 return (enum i40e_filter_pctype)i;
9757                 }
9758         }
9759         return I40E_FILTER_PCTYPE_INVALID;
9760 }
9761
9762 uint16_t
9763 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9764                         enum i40e_filter_pctype pctype)
9765 {
9766         uint16_t flowtype;
9767         uint64_t pctype_mask = 1ULL << pctype;
9768
9769         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9770              flowtype++) {
9771                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9772                         return flowtype;
9773         }
9774
9775         return RTE_ETH_FLOW_UNKNOWN;
9776 }
9777
9778 /*
9779  * On X710, performance number is far from the expectation on recent firmware
9780  * versions; on XL710, performance number is also far from the expectation on
9781  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9782  * mode is enabled and port MAC address is equal to the packet destination MAC
9783  * address. The fix for this issue may not be integrated in the following
9784  * firmware version. So the workaround in software driver is needed. It needs
9785  * to modify the initial values of 3 internal only registers for both X710 and
9786  * XL710. Note that the values for X710 or XL710 could be different, and the
9787  * workaround can be removed when it is fixed in firmware in the future.
9788  */
9789
9790 /* For both X710 and XL710 */
9791 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
9792 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
9793 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
9794
9795 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9796 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9797
9798 /* For X722 */
9799 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9800 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9801
9802 /* For X710 */
9803 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
9804 /* For XL710 */
9805 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
9806 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
9807
9808 static int
9809 i40e_dev_sync_phy_type(struct i40e_hw *hw)
9810 {
9811         enum i40e_status_code status;
9812         struct i40e_aq_get_phy_abilities_resp phy_ab;
9813         int ret = -ENOTSUP;
9814         int retries = 0;
9815
9816         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
9817                                               NULL);
9818
9819         while (status) {
9820                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
9821                         status);
9822                 retries++;
9823                 rte_delay_us(100000);
9824                 if  (retries < 5)
9825                         status = i40e_aq_get_phy_capabilities(hw, false,
9826                                         true, &phy_ab, NULL);
9827                 else
9828                         return ret;
9829         }
9830         return 0;
9831 }
9832
9833 static void
9834 i40e_configure_registers(struct i40e_hw *hw)
9835 {
9836         static struct {
9837                 uint32_t addr;
9838                 uint64_t val;
9839         } reg_table[] = {
9840                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
9841                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
9842                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
9843         };
9844         uint64_t reg;
9845         uint32_t i;
9846         int ret;
9847
9848         for (i = 0; i < RTE_DIM(reg_table); i++) {
9849                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
9850                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9851                                 reg_table[i].val =
9852                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
9853                         else /* For X710/XL710/XXV710 */
9854                                 if (hw->aq.fw_maj_ver < 6)
9855                                         reg_table[i].val =
9856                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
9857                                 else
9858                                         reg_table[i].val =
9859                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
9860                 }
9861
9862                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
9863                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9864                                 reg_table[i].val =
9865                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9866                         else /* For X710/XL710/XXV710 */
9867                                 reg_table[i].val =
9868                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9869                 }
9870
9871                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
9872                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
9873                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
9874                                 reg_table[i].val =
9875                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
9876                         else /* For X710 */
9877                                 reg_table[i].val =
9878                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
9879                 }
9880
9881                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
9882                                                         &reg, NULL);
9883                 if (ret < 0) {
9884                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
9885                                                         reg_table[i].addr);
9886                         break;
9887                 }
9888                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
9889                                                 reg_table[i].addr, reg);
9890                 if (reg == reg_table[i].val)
9891                         continue;
9892
9893                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
9894                                                 reg_table[i].val, NULL);
9895                 if (ret < 0) {
9896                         PMD_DRV_LOG(ERR,
9897                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
9898                                 reg_table[i].val, reg_table[i].addr);
9899                         break;
9900                 }
9901                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
9902                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
9903         }
9904 }
9905
9906 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
9907 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
9908 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
9909 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
9910 static int
9911 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
9912 {
9913         uint32_t reg;
9914         int ret;
9915
9916         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
9917                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
9918                 return -EINVAL;
9919         }
9920
9921         /* Configure for double VLAN RX stripping */
9922         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
9923         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
9924                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
9925                 ret = i40e_aq_debug_write_register(hw,
9926                                                    I40E_VSI_TSR(vsi->vsi_id),
9927                                                    reg, NULL);
9928                 if (ret < 0) {
9929                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
9930                                     vsi->vsi_id);
9931                         return I40E_ERR_CONFIG;
9932                 }
9933         }
9934
9935         /* Configure for double VLAN TX insertion */
9936         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
9937         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
9938                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
9939                 ret = i40e_aq_debug_write_register(hw,
9940                                                    I40E_VSI_L2TAGSTXVALID(
9941                                                    vsi->vsi_id), reg, NULL);
9942                 if (ret < 0) {
9943                         PMD_DRV_LOG(ERR,
9944                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
9945                                 vsi->vsi_id);
9946                         return I40E_ERR_CONFIG;
9947                 }
9948         }
9949
9950         return 0;
9951 }
9952
9953 /**
9954  * i40e_aq_add_mirror_rule
9955  * @hw: pointer to the hardware structure
9956  * @seid: VEB seid to add mirror rule to
9957  * @dst_id: destination vsi seid
9958  * @entries: Buffer which contains the entities to be mirrored
9959  * @count: number of entities contained in the buffer
9960  * @rule_id:the rule_id of the rule to be added
9961  *
9962  * Add a mirror rule for a given veb.
9963  *
9964  **/
9965 static enum i40e_status_code
9966 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
9967                         uint16_t seid, uint16_t dst_id,
9968                         uint16_t rule_type, uint16_t *entries,
9969                         uint16_t count, uint16_t *rule_id)
9970 {
9971         struct i40e_aq_desc desc;
9972         struct i40e_aqc_add_delete_mirror_rule cmd;
9973         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
9974                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
9975                 &desc.params.raw;
9976         uint16_t buff_len;
9977         enum i40e_status_code status;
9978
9979         i40e_fill_default_direct_cmd_desc(&desc,
9980                                           i40e_aqc_opc_add_mirror_rule);
9981         memset(&cmd, 0, sizeof(cmd));
9982
9983         buff_len = sizeof(uint16_t) * count;
9984         desc.datalen = rte_cpu_to_le_16(buff_len);
9985         if (buff_len > 0)
9986                 desc.flags |= rte_cpu_to_le_16(
9987                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
9988         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
9989                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
9990         cmd.num_entries = rte_cpu_to_le_16(count);
9991         cmd.seid = rte_cpu_to_le_16(seid);
9992         cmd.destination = rte_cpu_to_le_16(dst_id);
9993
9994         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
9995         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
9996         PMD_DRV_LOG(INFO,
9997                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
9998                 hw->aq.asq_last_status, resp->rule_id,
9999                 resp->mirror_rules_used, resp->mirror_rules_free);
10000         *rule_id = rte_le_to_cpu_16(resp->rule_id);
10001
10002         return status;
10003 }
10004
10005 /**
10006  * i40e_aq_del_mirror_rule
10007  * @hw: pointer to the hardware structure
10008  * @seid: VEB seid to add mirror rule to
10009  * @entries: Buffer which contains the entities to be mirrored
10010  * @count: number of entities contained in the buffer
10011  * @rule_id:the rule_id of the rule to be delete
10012  *
10013  * Delete a mirror rule for a given veb.
10014  *
10015  **/
10016 static enum i40e_status_code
10017 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10018                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10019                 uint16_t count, uint16_t rule_id)
10020 {
10021         struct i40e_aq_desc desc;
10022         struct i40e_aqc_add_delete_mirror_rule cmd;
10023         uint16_t buff_len = 0;
10024         enum i40e_status_code status;
10025         void *buff = NULL;
10026
10027         i40e_fill_default_direct_cmd_desc(&desc,
10028                                           i40e_aqc_opc_delete_mirror_rule);
10029         memset(&cmd, 0, sizeof(cmd));
10030         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10031                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10032                                                           I40E_AQ_FLAG_RD));
10033                 cmd.num_entries = count;
10034                 buff_len = sizeof(uint16_t) * count;
10035                 desc.datalen = rte_cpu_to_le_16(buff_len);
10036                 buff = (void *)entries;
10037         } else
10038                 /* rule id is filled in destination field for deleting mirror rule */
10039                 cmd.destination = rte_cpu_to_le_16(rule_id);
10040
10041         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10042                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10043         cmd.seid = rte_cpu_to_le_16(seid);
10044
10045         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10046         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10047
10048         return status;
10049 }
10050
10051 /**
10052  * i40e_mirror_rule_set
10053  * @dev: pointer to the hardware structure
10054  * @mirror_conf: mirror rule info
10055  * @sw_id: mirror rule's sw_id
10056  * @on: enable/disable
10057  *
10058  * set a mirror rule.
10059  *
10060  **/
10061 static int
10062 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10063                         struct rte_eth_mirror_conf *mirror_conf,
10064                         uint8_t sw_id, uint8_t on)
10065 {
10066         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10067         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10068         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10069         struct i40e_mirror_rule *parent = NULL;
10070         uint16_t seid, dst_seid, rule_id;
10071         uint16_t i, j = 0;
10072         int ret;
10073
10074         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10075
10076         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10077                 PMD_DRV_LOG(ERR,
10078                         "mirror rule can not be configured without veb or vfs.");
10079                 return -ENOSYS;
10080         }
10081         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10082                 PMD_DRV_LOG(ERR, "mirror table is full.");
10083                 return -ENOSPC;
10084         }
10085         if (mirror_conf->dst_pool > pf->vf_num) {
10086                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10087                                  mirror_conf->dst_pool);
10088                 return -EINVAL;
10089         }
10090
10091         seid = pf->main_vsi->veb->seid;
10092
10093         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10094                 if (sw_id <= it->index) {
10095                         mirr_rule = it;
10096                         break;
10097                 }
10098                 parent = it;
10099         }
10100         if (mirr_rule && sw_id == mirr_rule->index) {
10101                 if (on) {
10102                         PMD_DRV_LOG(ERR, "mirror rule exists.");
10103                         return -EEXIST;
10104                 } else {
10105                         ret = i40e_aq_del_mirror_rule(hw, seid,
10106                                         mirr_rule->rule_type,
10107                                         mirr_rule->entries,
10108                                         mirr_rule->num_entries, mirr_rule->id);
10109                         if (ret < 0) {
10110                                 PMD_DRV_LOG(ERR,
10111                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
10112                                         ret, hw->aq.asq_last_status);
10113                                 return -ENOSYS;
10114                         }
10115                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10116                         rte_free(mirr_rule);
10117                         pf->nb_mirror_rule--;
10118                         return 0;
10119                 }
10120         } else if (!on) {
10121                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10122                 return -ENOENT;
10123         }
10124
10125         mirr_rule = rte_zmalloc("i40e_mirror_rule",
10126                                 sizeof(struct i40e_mirror_rule) , 0);
10127         if (!mirr_rule) {
10128                 PMD_DRV_LOG(ERR, "failed to allocate memory");
10129                 return I40E_ERR_NO_MEMORY;
10130         }
10131         switch (mirror_conf->rule_type) {
10132         case ETH_MIRROR_VLAN:
10133                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10134                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10135                                 mirr_rule->entries[j] =
10136                                         mirror_conf->vlan.vlan_id[i];
10137                                 j++;
10138                         }
10139                 }
10140                 if (j == 0) {
10141                         PMD_DRV_LOG(ERR, "vlan is not specified.");
10142                         rte_free(mirr_rule);
10143                         return -EINVAL;
10144                 }
10145                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10146                 break;
10147         case ETH_MIRROR_VIRTUAL_POOL_UP:
10148         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10149                 /* check if the specified pool bit is out of range */
10150                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10151                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
10152                         rte_free(mirr_rule);
10153                         return -EINVAL;
10154                 }
10155                 for (i = 0, j = 0; i < pf->vf_num; i++) {
10156                         if (mirror_conf->pool_mask & (1ULL << i)) {
10157                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10158                                 j++;
10159                         }
10160                 }
10161                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10162                         /* add pf vsi to entries */
10163                         mirr_rule->entries[j] = pf->main_vsi_seid;
10164                         j++;
10165                 }
10166                 if (j == 0) {
10167                         PMD_DRV_LOG(ERR, "pool is not specified.");
10168                         rte_free(mirr_rule);
10169                         return -EINVAL;
10170                 }
10171                 /* egress and ingress in aq commands means from switch but not port */
10172                 mirr_rule->rule_type =
10173                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10174                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10175                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10176                 break;
10177         case ETH_MIRROR_UPLINK_PORT:
10178                 /* egress and ingress in aq commands means from switch but not port*/
10179                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10180                 break;
10181         case ETH_MIRROR_DOWNLINK_PORT:
10182                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10183                 break;
10184         default:
10185                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10186                         mirror_conf->rule_type);
10187                 rte_free(mirr_rule);
10188                 return -EINVAL;
10189         }
10190
10191         /* If the dst_pool is equal to vf_num, consider it as PF */
10192         if (mirror_conf->dst_pool == pf->vf_num)
10193                 dst_seid = pf->main_vsi_seid;
10194         else
10195                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10196
10197         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10198                                       mirr_rule->rule_type, mirr_rule->entries,
10199                                       j, &rule_id);
10200         if (ret < 0) {
10201                 PMD_DRV_LOG(ERR,
10202                         "failed to add mirror rule: ret = %d, aq_err = %d.",
10203                         ret, hw->aq.asq_last_status);
10204                 rte_free(mirr_rule);
10205                 return -ENOSYS;
10206         }
10207
10208         mirr_rule->index = sw_id;
10209         mirr_rule->num_entries = j;
10210         mirr_rule->id = rule_id;
10211         mirr_rule->dst_vsi_seid = dst_seid;
10212
10213         if (parent)
10214                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10215         else
10216                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10217
10218         pf->nb_mirror_rule++;
10219         return 0;
10220 }
10221
10222 /**
10223  * i40e_mirror_rule_reset
10224  * @dev: pointer to the device
10225  * @sw_id: mirror rule's sw_id
10226  *
10227  * reset a mirror rule.
10228  *
10229  **/
10230 static int
10231 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10232 {
10233         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10234         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10235         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10236         uint16_t seid;
10237         int ret;
10238
10239         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10240
10241         seid = pf->main_vsi->veb->seid;
10242
10243         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10244                 if (sw_id == it->index) {
10245                         mirr_rule = it;
10246                         break;
10247                 }
10248         }
10249         if (mirr_rule) {
10250                 ret = i40e_aq_del_mirror_rule(hw, seid,
10251                                 mirr_rule->rule_type,
10252                                 mirr_rule->entries,
10253                                 mirr_rule->num_entries, mirr_rule->id);
10254                 if (ret < 0) {
10255                         PMD_DRV_LOG(ERR,
10256                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
10257                                 ret, hw->aq.asq_last_status);
10258                         return -ENOSYS;
10259                 }
10260                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10261                 rte_free(mirr_rule);
10262                 pf->nb_mirror_rule--;
10263         } else {
10264                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10265                 return -ENOENT;
10266         }
10267         return 0;
10268 }
10269
10270 static uint64_t
10271 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10272 {
10273         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10274         uint64_t systim_cycles;
10275
10276         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10277         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10278                         << 32;
10279
10280         return systim_cycles;
10281 }
10282
10283 static uint64_t
10284 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10285 {
10286         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10287         uint64_t rx_tstamp;
10288
10289         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10290         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10291                         << 32;
10292
10293         return rx_tstamp;
10294 }
10295
10296 static uint64_t
10297 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10298 {
10299         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10300         uint64_t tx_tstamp;
10301
10302         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10303         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10304                         << 32;
10305
10306         return tx_tstamp;
10307 }
10308
10309 static void
10310 i40e_start_timecounters(struct rte_eth_dev *dev)
10311 {
10312         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10313         struct i40e_adapter *adapter =
10314                         (struct i40e_adapter *)dev->data->dev_private;
10315         struct rte_eth_link link;
10316         uint32_t tsync_inc_l;
10317         uint32_t tsync_inc_h;
10318
10319         /* Get current link speed. */
10320         memset(&link, 0, sizeof(link));
10321         i40e_dev_link_update(dev, 1);
10322         rte_i40e_dev_atomic_read_link_status(dev, &link);
10323
10324         switch (link.link_speed) {
10325         case ETH_SPEED_NUM_40G:
10326                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10327                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10328                 break;
10329         case ETH_SPEED_NUM_10G:
10330                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10331                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10332                 break;
10333         case ETH_SPEED_NUM_1G:
10334                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10335                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10336                 break;
10337         default:
10338                 tsync_inc_l = 0x0;
10339                 tsync_inc_h = 0x0;
10340         }
10341
10342         /* Set the timesync increment value. */
10343         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10344         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10345
10346         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10347         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10348         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10349
10350         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10351         adapter->systime_tc.cc_shift = 0;
10352         adapter->systime_tc.nsec_mask = 0;
10353
10354         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10355         adapter->rx_tstamp_tc.cc_shift = 0;
10356         adapter->rx_tstamp_tc.nsec_mask = 0;
10357
10358         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10359         adapter->tx_tstamp_tc.cc_shift = 0;
10360         adapter->tx_tstamp_tc.nsec_mask = 0;
10361 }
10362
10363 static int
10364 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10365 {
10366         struct i40e_adapter *adapter =
10367                         (struct i40e_adapter *)dev->data->dev_private;
10368
10369         adapter->systime_tc.nsec += delta;
10370         adapter->rx_tstamp_tc.nsec += delta;
10371         adapter->tx_tstamp_tc.nsec += delta;
10372
10373         return 0;
10374 }
10375
10376 static int
10377 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10378 {
10379         uint64_t ns;
10380         struct i40e_adapter *adapter =
10381                         (struct i40e_adapter *)dev->data->dev_private;
10382
10383         ns = rte_timespec_to_ns(ts);
10384
10385         /* Set the timecounters to a new value. */
10386         adapter->systime_tc.nsec = ns;
10387         adapter->rx_tstamp_tc.nsec = ns;
10388         adapter->tx_tstamp_tc.nsec = ns;
10389
10390         return 0;
10391 }
10392
10393 static int
10394 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10395 {
10396         uint64_t ns, systime_cycles;
10397         struct i40e_adapter *adapter =
10398                         (struct i40e_adapter *)dev->data->dev_private;
10399
10400         systime_cycles = i40e_read_systime_cyclecounter(dev);
10401         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10402         *ts = rte_ns_to_timespec(ns);
10403
10404         return 0;
10405 }
10406
10407 static int
10408 i40e_timesync_enable(struct rte_eth_dev *dev)
10409 {
10410         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10411         uint32_t tsync_ctl_l;
10412         uint32_t tsync_ctl_h;
10413
10414         /* Stop the timesync system time. */
10415         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10416         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10417         /* Reset the timesync system time value. */
10418         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10419         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10420
10421         i40e_start_timecounters(dev);
10422
10423         /* Clear timesync registers. */
10424         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10425         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10426         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10427         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10428         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10429         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10430
10431         /* Enable timestamping of PTP packets. */
10432         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10433         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10434
10435         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10436         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10437         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10438
10439         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10440         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10441
10442         return 0;
10443 }
10444
10445 static int
10446 i40e_timesync_disable(struct rte_eth_dev *dev)
10447 {
10448         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10449         uint32_t tsync_ctl_l;
10450         uint32_t tsync_ctl_h;
10451
10452         /* Disable timestamping of transmitted PTP packets. */
10453         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10454         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10455
10456         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10457         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10458
10459         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10460         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10461
10462         /* Reset the timesync increment value. */
10463         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10464         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10465
10466         return 0;
10467 }
10468
10469 static int
10470 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10471                                 struct timespec *timestamp, uint32_t flags)
10472 {
10473         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10474         struct i40e_adapter *adapter =
10475                 (struct i40e_adapter *)dev->data->dev_private;
10476
10477         uint32_t sync_status;
10478         uint32_t index = flags & 0x03;
10479         uint64_t rx_tstamp_cycles;
10480         uint64_t ns;
10481
10482         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10483         if ((sync_status & (1 << index)) == 0)
10484                 return -EINVAL;
10485
10486         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10487         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10488         *timestamp = rte_ns_to_timespec(ns);
10489
10490         return 0;
10491 }
10492
10493 static int
10494 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10495                                 struct timespec *timestamp)
10496 {
10497         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10498         struct i40e_adapter *adapter =
10499                 (struct i40e_adapter *)dev->data->dev_private;
10500
10501         uint32_t sync_status;
10502         uint64_t tx_tstamp_cycles;
10503         uint64_t ns;
10504
10505         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10506         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10507                 return -EINVAL;
10508
10509         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10510         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10511         *timestamp = rte_ns_to_timespec(ns);
10512
10513         return 0;
10514 }
10515
10516 /*
10517  * i40e_parse_dcb_configure - parse dcb configure from user
10518  * @dev: the device being configured
10519  * @dcb_cfg: pointer of the result of parse
10520  * @*tc_map: bit map of enabled traffic classes
10521  *
10522  * Returns 0 on success, negative value on failure
10523  */
10524 static int
10525 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10526                          struct i40e_dcbx_config *dcb_cfg,
10527                          uint8_t *tc_map)
10528 {
10529         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10530         uint8_t i, tc_bw, bw_lf;
10531
10532         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10533
10534         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10535         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10536                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10537                 return -EINVAL;
10538         }
10539
10540         /* assume each tc has the same bw */
10541         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10542         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10543                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10544         /* to ensure the sum of tcbw is equal to 100 */
10545         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10546         for (i = 0; i < bw_lf; i++)
10547                 dcb_cfg->etscfg.tcbwtable[i]++;
10548
10549         /* assume each tc has the same Transmission Selection Algorithm */
10550         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10551                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10552
10553         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10554                 dcb_cfg->etscfg.prioritytable[i] =
10555                                 dcb_rx_conf->dcb_tc[i];
10556
10557         /* FW needs one App to configure HW */
10558         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10559         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10560         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10561         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10562
10563         if (dcb_rx_conf->nb_tcs == 0)
10564                 *tc_map = 1; /* tc0 only */
10565         else
10566                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10567
10568         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10569                 dcb_cfg->pfc.willing = 0;
10570                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10571                 dcb_cfg->pfc.pfcenable = *tc_map;
10572         }
10573         return 0;
10574 }
10575
10576
10577 static enum i40e_status_code
10578 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10579                               struct i40e_aqc_vsi_properties_data *info,
10580                               uint8_t enabled_tcmap)
10581 {
10582         enum i40e_status_code ret;
10583         int i, total_tc = 0;
10584         uint16_t qpnum_per_tc, bsf, qp_idx;
10585         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10586         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10587         uint16_t used_queues;
10588
10589         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10590         if (ret != I40E_SUCCESS)
10591                 return ret;
10592
10593         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10594                 if (enabled_tcmap & (1 << i))
10595                         total_tc++;
10596         }
10597         if (total_tc == 0)
10598                 total_tc = 1;
10599         vsi->enabled_tc = enabled_tcmap;
10600
10601         /* different VSI has different queues assigned */
10602         if (vsi->type == I40E_VSI_MAIN)
10603                 used_queues = dev_data->nb_rx_queues -
10604                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10605         else if (vsi->type == I40E_VSI_VMDQ2)
10606                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10607         else {
10608                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10609                 return I40E_ERR_NO_AVAILABLE_VSI;
10610         }
10611
10612         qpnum_per_tc = used_queues / total_tc;
10613         /* Number of queues per enabled TC */
10614         if (qpnum_per_tc == 0) {
10615                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10616                 return I40E_ERR_INVALID_QP_ID;
10617         }
10618         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10619                                 I40E_MAX_Q_PER_TC);
10620         bsf = rte_bsf32(qpnum_per_tc);
10621
10622         /**
10623          * Configure TC and queue mapping parameters, for enabled TC,
10624          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10625          * default queue will serve it.
10626          */
10627         qp_idx = 0;
10628         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10629                 if (vsi->enabled_tc & (1 << i)) {
10630                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10631                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10632                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10633                         qp_idx += qpnum_per_tc;
10634                 } else
10635                         info->tc_mapping[i] = 0;
10636         }
10637
10638         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10639         if (vsi->type == I40E_VSI_SRIOV) {
10640                 info->mapping_flags |=
10641                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10642                 for (i = 0; i < vsi->nb_qps; i++)
10643                         info->queue_mapping[i] =
10644                                 rte_cpu_to_le_16(vsi->base_queue + i);
10645         } else {
10646                 info->mapping_flags |=
10647                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10648                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10649         }
10650         info->valid_sections |=
10651                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10652
10653         return I40E_SUCCESS;
10654 }
10655
10656 /*
10657  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10658  * @veb: VEB to be configured
10659  * @tc_map: enabled TC bitmap
10660  *
10661  * Returns 0 on success, negative value on failure
10662  */
10663 static enum i40e_status_code
10664 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10665 {
10666         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10667         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10668         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10669         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10670         enum i40e_status_code ret = I40E_SUCCESS;
10671         int i;
10672         uint32_t bw_max;
10673
10674         /* Check if enabled_tc is same as existing or new TCs */
10675         if (veb->enabled_tc == tc_map)
10676                 return ret;
10677
10678         /* configure tc bandwidth */
10679         memset(&veb_bw, 0, sizeof(veb_bw));
10680         veb_bw.tc_valid_bits = tc_map;
10681         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10682         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10683                 if (tc_map & BIT_ULL(i))
10684                         veb_bw.tc_bw_share_credits[i] = 1;
10685         }
10686         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10687                                                    &veb_bw, NULL);
10688         if (ret) {
10689                 PMD_INIT_LOG(ERR,
10690                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10691                         hw->aq.asq_last_status);
10692                 return ret;
10693         }
10694
10695         memset(&ets_query, 0, sizeof(ets_query));
10696         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10697                                                    &ets_query, NULL);
10698         if (ret != I40E_SUCCESS) {
10699                 PMD_DRV_LOG(ERR,
10700                         "Failed to get switch_comp ETS configuration %u",
10701                         hw->aq.asq_last_status);
10702                 return ret;
10703         }
10704         memset(&bw_query, 0, sizeof(bw_query));
10705         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10706                                                   &bw_query, NULL);
10707         if (ret != I40E_SUCCESS) {
10708                 PMD_DRV_LOG(ERR,
10709                         "Failed to get switch_comp bandwidth configuration %u",
10710                         hw->aq.asq_last_status);
10711                 return ret;
10712         }
10713
10714         /* store and print out BW info */
10715         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10716         veb->bw_info.bw_max = ets_query.tc_bw_max;
10717         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10718         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10719         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10720                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10721                      I40E_16_BIT_WIDTH);
10722         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10723                 veb->bw_info.bw_ets_share_credits[i] =
10724                                 bw_query.tc_bw_share_credits[i];
10725                 veb->bw_info.bw_ets_credits[i] =
10726                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10727                 /* 4 bits per TC, 4th bit is reserved */
10728                 veb->bw_info.bw_ets_max[i] =
10729                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10730                                   RTE_LEN2MASK(3, uint8_t));
10731                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10732                             veb->bw_info.bw_ets_share_credits[i]);
10733                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10734                             veb->bw_info.bw_ets_credits[i]);
10735                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10736                             veb->bw_info.bw_ets_max[i]);
10737         }
10738
10739         veb->enabled_tc = tc_map;
10740
10741         return ret;
10742 }
10743
10744
10745 /*
10746  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10747  * @vsi: VSI to be configured
10748  * @tc_map: enabled TC bitmap
10749  *
10750  * Returns 0 on success, negative value on failure
10751  */
10752 static enum i40e_status_code
10753 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10754 {
10755         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10756         struct i40e_vsi_context ctxt;
10757         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10758         enum i40e_status_code ret = I40E_SUCCESS;
10759         int i;
10760
10761         /* Check if enabled_tc is same as existing or new TCs */
10762         if (vsi->enabled_tc == tc_map)
10763                 return ret;
10764
10765         /* configure tc bandwidth */
10766         memset(&bw_data, 0, sizeof(bw_data));
10767         bw_data.tc_valid_bits = tc_map;
10768         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10769         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10770                 if (tc_map & BIT_ULL(i))
10771                         bw_data.tc_bw_credits[i] = 1;
10772         }
10773         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10774         if (ret) {
10775                 PMD_INIT_LOG(ERR,
10776                         "AQ command Config VSI BW allocation per TC failed = %d",
10777                         hw->aq.asq_last_status);
10778                 goto out;
10779         }
10780         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10781                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10782
10783         /* Update Queue Pairs Mapping for currently enabled UPs */
10784         ctxt.seid = vsi->seid;
10785         ctxt.pf_num = hw->pf_id;
10786         ctxt.vf_num = 0;
10787         ctxt.uplink_seid = vsi->uplink_seid;
10788         ctxt.info = vsi->info;
10789         i40e_get_cap(hw);
10790         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10791         if (ret)
10792                 goto out;
10793
10794         /* Update the VSI after updating the VSI queue-mapping information */
10795         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10796         if (ret) {
10797                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10798                         hw->aq.asq_last_status);
10799                 goto out;
10800         }
10801         /* update the local VSI info with updated queue map */
10802         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10803                                         sizeof(vsi->info.tc_mapping));
10804         rte_memcpy(&vsi->info.queue_mapping,
10805                         &ctxt.info.queue_mapping,
10806                 sizeof(vsi->info.queue_mapping));
10807         vsi->info.mapping_flags = ctxt.info.mapping_flags;
10808         vsi->info.valid_sections = 0;
10809
10810         /* query and update current VSI BW information */
10811         ret = i40e_vsi_get_bw_config(vsi);
10812         if (ret) {
10813                 PMD_INIT_LOG(ERR,
10814                          "Failed updating vsi bw info, err %s aq_err %s",
10815                          i40e_stat_str(hw, ret),
10816                          i40e_aq_str(hw, hw->aq.asq_last_status));
10817                 goto out;
10818         }
10819
10820         vsi->enabled_tc = tc_map;
10821
10822 out:
10823         return ret;
10824 }
10825
10826 /*
10827  * i40e_dcb_hw_configure - program the dcb setting to hw
10828  * @pf: pf the configuration is taken on
10829  * @new_cfg: new configuration
10830  * @tc_map: enabled TC bitmap
10831  *
10832  * Returns 0 on success, negative value on failure
10833  */
10834 static enum i40e_status_code
10835 i40e_dcb_hw_configure(struct i40e_pf *pf,
10836                       struct i40e_dcbx_config *new_cfg,
10837                       uint8_t tc_map)
10838 {
10839         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10840         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
10841         struct i40e_vsi *main_vsi = pf->main_vsi;
10842         struct i40e_vsi_list *vsi_list;
10843         enum i40e_status_code ret;
10844         int i;
10845         uint32_t val;
10846
10847         /* Use the FW API if FW > v4.4*/
10848         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
10849               (hw->aq.fw_maj_ver >= 5))) {
10850                 PMD_INIT_LOG(ERR,
10851                         "FW < v4.4, can not use FW LLDP API to configure DCB");
10852                 return I40E_ERR_FIRMWARE_API_VERSION;
10853         }
10854
10855         /* Check if need reconfiguration */
10856         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
10857                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
10858                 return I40E_SUCCESS;
10859         }
10860
10861         /* Copy the new config to the current config */
10862         *old_cfg = *new_cfg;
10863         old_cfg->etsrec = old_cfg->etscfg;
10864         ret = i40e_set_dcb_config(hw);
10865         if (ret) {
10866                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
10867                          i40e_stat_str(hw, ret),
10868                          i40e_aq_str(hw, hw->aq.asq_last_status));
10869                 return ret;
10870         }
10871         /* set receive Arbiter to RR mode and ETS scheme by default */
10872         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
10873                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
10874                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
10875                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
10876                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
10877                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
10878                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
10879                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
10880                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
10881                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
10882                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
10883                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
10884                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
10885         }
10886         /* get local mib to check whether it is configured correctly */
10887         /* IEEE mode */
10888         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
10889         /* Get Local DCB Config */
10890         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
10891                                      &hw->local_dcbx_config);
10892
10893         /* if Veb is created, need to update TC of it at first */
10894         if (main_vsi->veb) {
10895                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
10896                 if (ret)
10897                         PMD_INIT_LOG(WARNING,
10898                                  "Failed configuring TC for VEB seid=%d",
10899                                  main_vsi->veb->seid);
10900         }
10901         /* Update each VSI */
10902         i40e_vsi_config_tc(main_vsi, tc_map);
10903         if (main_vsi->veb) {
10904                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
10905                         /* Beside main VSI and VMDQ VSIs, only enable default
10906                          * TC for other VSIs
10907                          */
10908                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
10909                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10910                                                          tc_map);
10911                         else
10912                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10913                                                          I40E_DEFAULT_TCMAP);
10914                         if (ret)
10915                                 PMD_INIT_LOG(WARNING,
10916                                         "Failed configuring TC for VSI seid=%d",
10917                                         vsi_list->vsi->seid);
10918                         /* continue */
10919                 }
10920         }
10921         return I40E_SUCCESS;
10922 }
10923
10924 /*
10925  * i40e_dcb_init_configure - initial dcb config
10926  * @dev: device being configured
10927  * @sw_dcb: indicate whether dcb is sw configured or hw offload
10928  *
10929  * Returns 0 on success, negative value on failure
10930  */
10931 int
10932 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
10933 {
10934         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10935         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10936         int i, ret = 0;
10937
10938         if ((pf->flags & I40E_FLAG_DCB) == 0) {
10939                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10940                 return -ENOTSUP;
10941         }
10942
10943         /* DCB initialization:
10944          * Update DCB configuration from the Firmware and configure
10945          * LLDP MIB change event.
10946          */
10947         if (sw_dcb == TRUE) {
10948                 ret = i40e_init_dcb(hw);
10949                 /* If lldp agent is stopped, the return value from
10950                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
10951                  * adminq status. Otherwise, it should return success.
10952                  */
10953                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
10954                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
10955                         memset(&hw->local_dcbx_config, 0,
10956                                 sizeof(struct i40e_dcbx_config));
10957                         /* set dcb default configuration */
10958                         hw->local_dcbx_config.etscfg.willing = 0;
10959                         hw->local_dcbx_config.etscfg.maxtcs = 0;
10960                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
10961                         hw->local_dcbx_config.etscfg.tsatable[0] =
10962                                                 I40E_IEEE_TSA_ETS;
10963                         /* all UPs mapping to TC0 */
10964                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10965                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
10966                         hw->local_dcbx_config.etsrec =
10967                                 hw->local_dcbx_config.etscfg;
10968                         hw->local_dcbx_config.pfc.willing = 0;
10969                         hw->local_dcbx_config.pfc.pfccap =
10970                                                 I40E_MAX_TRAFFIC_CLASS;
10971                         /* FW needs one App to configure HW */
10972                         hw->local_dcbx_config.numapps = 1;
10973                         hw->local_dcbx_config.app[0].selector =
10974                                                 I40E_APP_SEL_ETHTYPE;
10975                         hw->local_dcbx_config.app[0].priority = 3;
10976                         hw->local_dcbx_config.app[0].protocolid =
10977                                                 I40E_APP_PROTOID_FCOE;
10978                         ret = i40e_set_dcb_config(hw);
10979                         if (ret) {
10980                                 PMD_INIT_LOG(ERR,
10981                                         "default dcb config fails. err = %d, aq_err = %d.",
10982                                         ret, hw->aq.asq_last_status);
10983                                 return -ENOSYS;
10984                         }
10985                 } else {
10986                         PMD_INIT_LOG(ERR,
10987                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
10988                                 ret, hw->aq.asq_last_status);
10989                         return -ENOTSUP;
10990                 }
10991         } else {
10992                 ret = i40e_aq_start_lldp(hw, NULL);
10993                 if (ret != I40E_SUCCESS)
10994                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
10995
10996                 ret = i40e_init_dcb(hw);
10997                 if (!ret) {
10998                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
10999                                 PMD_INIT_LOG(ERR,
11000                                         "HW doesn't support DCBX offload.");
11001                                 return -ENOTSUP;
11002                         }
11003                 } else {
11004                         PMD_INIT_LOG(ERR,
11005                                 "DCBX configuration failed, err = %d, aq_err = %d.",
11006                                 ret, hw->aq.asq_last_status);
11007                         return -ENOTSUP;
11008                 }
11009         }
11010         return 0;
11011 }
11012
11013 /*
11014  * i40e_dcb_setup - setup dcb related config
11015  * @dev: device being configured
11016  *
11017  * Returns 0 on success, negative value on failure
11018  */
11019 static int
11020 i40e_dcb_setup(struct rte_eth_dev *dev)
11021 {
11022         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11023         struct i40e_dcbx_config dcb_cfg;
11024         uint8_t tc_map = 0;
11025         int ret = 0;
11026
11027         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11028                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11029                 return -ENOTSUP;
11030         }
11031
11032         if (pf->vf_num != 0)
11033                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11034
11035         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11036         if (ret) {
11037                 PMD_INIT_LOG(ERR, "invalid dcb config");
11038                 return -EINVAL;
11039         }
11040         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11041         if (ret) {
11042                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11043                 return -ENOSYS;
11044         }
11045
11046         return 0;
11047 }
11048
11049 static int
11050 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11051                       struct rte_eth_dcb_info *dcb_info)
11052 {
11053         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11054         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11055         struct i40e_vsi *vsi = pf->main_vsi;
11056         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11057         uint16_t bsf, tc_mapping;
11058         int i, j = 0;
11059
11060         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11061                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11062         else
11063                 dcb_info->nb_tcs = 1;
11064         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11065                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11066         for (i = 0; i < dcb_info->nb_tcs; i++)
11067                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11068
11069         /* get queue mapping if vmdq is disabled */
11070         if (!pf->nb_cfg_vmdq_vsi) {
11071                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11072                         if (!(vsi->enabled_tc & (1 << i)))
11073                                 continue;
11074                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11075                         dcb_info->tc_queue.tc_rxq[j][i].base =
11076                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11077                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11078                         dcb_info->tc_queue.tc_txq[j][i].base =
11079                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11080                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11081                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11082                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11083                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11084                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11085                 }
11086                 return 0;
11087         }
11088
11089         /* get queue mapping if vmdq is enabled */
11090         do {
11091                 vsi = pf->vmdq[j].vsi;
11092                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11093                         if (!(vsi->enabled_tc & (1 << i)))
11094                                 continue;
11095                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11096                         dcb_info->tc_queue.tc_rxq[j][i].base =
11097                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11098                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11099                         dcb_info->tc_queue.tc_txq[j][i].base =
11100                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11101                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11102                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11103                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11104                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11105                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11106                 }
11107                 j++;
11108         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11109         return 0;
11110 }
11111
11112 static int
11113 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11114 {
11115         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11116         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11117         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11118         uint16_t msix_intr;
11119
11120         msix_intr = intr_handle->intr_vec[queue_id];
11121         if (msix_intr == I40E_MISC_VEC_ID)
11122                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11123                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
11124                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11125                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11126         else
11127                 I40E_WRITE_REG(hw,
11128                                I40E_PFINT_DYN_CTLN(msix_intr -
11129                                                    I40E_RX_VEC_START),
11130                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
11131                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11132                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11133
11134         I40E_WRITE_FLUSH(hw);
11135         rte_intr_enable(&pci_dev->intr_handle);
11136
11137         return 0;
11138 }
11139
11140 static int
11141 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11142 {
11143         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11144         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11145         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11146         uint16_t msix_intr;
11147
11148         msix_intr = intr_handle->intr_vec[queue_id];
11149         if (msix_intr == I40E_MISC_VEC_ID)
11150                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11151                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11152         else
11153                 I40E_WRITE_REG(hw,
11154                                I40E_PFINT_DYN_CTLN(msix_intr -
11155                                                    I40E_RX_VEC_START),
11156                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11157         I40E_WRITE_FLUSH(hw);
11158
11159         return 0;
11160 }
11161
11162 static int i40e_get_regs(struct rte_eth_dev *dev,
11163                          struct rte_dev_reg_info *regs)
11164 {
11165         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11166         uint32_t *ptr_data = regs->data;
11167         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11168         const struct i40e_reg_info *reg_info;
11169
11170         if (ptr_data == NULL) {
11171                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11172                 regs->width = sizeof(uint32_t);
11173                 return 0;
11174         }
11175
11176         /* The first few registers have to be read using AQ operations */
11177         reg_idx = 0;
11178         while (i40e_regs_adminq[reg_idx].name) {
11179                 reg_info = &i40e_regs_adminq[reg_idx++];
11180                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11181                         for (arr_idx2 = 0;
11182                                         arr_idx2 <= reg_info->count2;
11183                                         arr_idx2++) {
11184                                 reg_offset = arr_idx * reg_info->stride1 +
11185                                         arr_idx2 * reg_info->stride2;
11186                                 reg_offset += reg_info->base_addr;
11187                                 ptr_data[reg_offset >> 2] =
11188                                         i40e_read_rx_ctl(hw, reg_offset);
11189                         }
11190         }
11191
11192         /* The remaining registers can be read using primitives */
11193         reg_idx = 0;
11194         while (i40e_regs_others[reg_idx].name) {
11195                 reg_info = &i40e_regs_others[reg_idx++];
11196                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11197                         for (arr_idx2 = 0;
11198                                         arr_idx2 <= reg_info->count2;
11199                                         arr_idx2++) {
11200                                 reg_offset = arr_idx * reg_info->stride1 +
11201                                         arr_idx2 * reg_info->stride2;
11202                                 reg_offset += reg_info->base_addr;
11203                                 ptr_data[reg_offset >> 2] =
11204                                         I40E_READ_REG(hw, reg_offset);
11205                         }
11206         }
11207
11208         return 0;
11209 }
11210
11211 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11212 {
11213         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11214
11215         /* Convert word count to byte count */
11216         return hw->nvm.sr_size << 1;
11217 }
11218
11219 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11220                            struct rte_dev_eeprom_info *eeprom)
11221 {
11222         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11223         uint16_t *data = eeprom->data;
11224         uint16_t offset, length, cnt_words;
11225         int ret_code;
11226
11227         offset = eeprom->offset >> 1;
11228         length = eeprom->length >> 1;
11229         cnt_words = length;
11230
11231         if (offset > hw->nvm.sr_size ||
11232                 offset + length > hw->nvm.sr_size) {
11233                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11234                 return -EINVAL;
11235         }
11236
11237         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11238
11239         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11240         if (ret_code != I40E_SUCCESS || cnt_words != length) {
11241                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11242                 return -EIO;
11243         }
11244
11245         return 0;
11246 }
11247
11248 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11249                                       struct ether_addr *mac_addr)
11250 {
11251         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11252         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11253         struct i40e_vsi *vsi = pf->main_vsi;
11254         struct i40e_mac_filter_info mac_filter;
11255         struct i40e_mac_filter *f;
11256         int ret;
11257
11258         if (!is_valid_assigned_ether_addr(mac_addr)) {
11259                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11260                 return;
11261         }
11262
11263         TAILQ_FOREACH(f, &vsi->mac_list, next) {
11264                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
11265                         break;
11266         }
11267
11268         if (f == NULL) {
11269                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11270                 return;
11271         }
11272
11273         mac_filter = f->mac_info;
11274         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11275         if (ret != I40E_SUCCESS) {
11276                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11277                 return;
11278         }
11279         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11280         ret = i40e_vsi_add_mac(vsi, &mac_filter);
11281         if (ret != I40E_SUCCESS) {
11282                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11283                 return;
11284         }
11285         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11286
11287         i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11288                                   mac_addr->addr_bytes, NULL);
11289 }
11290
11291 static int
11292 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11293 {
11294         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11295         struct rte_eth_dev_data *dev_data = pf->dev_data;
11296         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11297         int ret = 0;
11298
11299         /* check if mtu is within the allowed range */
11300         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
11301                 return -EINVAL;
11302
11303         /* mtu setting is forbidden if port is start */
11304         if (dev_data->dev_started) {
11305                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11306                             dev_data->port_id);
11307                 return -EBUSY;
11308         }
11309
11310         if (frame_size > ETHER_MAX_LEN)
11311                 dev_data->dev_conf.rxmode.jumbo_frame = 1;
11312         else
11313                 dev_data->dev_conf.rxmode.jumbo_frame = 0;
11314
11315         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11316
11317         return ret;
11318 }
11319
11320 /* Restore ethertype filter */
11321 static void
11322 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11323 {
11324         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11325         struct i40e_ethertype_filter_list
11326                 *ethertype_list = &pf->ethertype.ethertype_list;
11327         struct i40e_ethertype_filter *f;
11328         struct i40e_control_filter_stats stats;
11329         uint16_t flags;
11330
11331         TAILQ_FOREACH(f, ethertype_list, rules) {
11332                 flags = 0;
11333                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11334                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11335                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11336                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11337                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11338
11339                 memset(&stats, 0, sizeof(stats));
11340                 i40e_aq_add_rem_control_packet_filter(hw,
11341                                             f->input.mac_addr.addr_bytes,
11342                                             f->input.ether_type,
11343                                             flags, pf->main_vsi->seid,
11344                                             f->queue, 1, &stats, NULL);
11345         }
11346         PMD_DRV_LOG(INFO, "Ethertype filter:"
11347                     " mac_etype_used = %u, etype_used = %u,"
11348                     " mac_etype_free = %u, etype_free = %u",
11349                     stats.mac_etype_used, stats.etype_used,
11350                     stats.mac_etype_free, stats.etype_free);
11351 }
11352
11353 /* Restore tunnel filter */
11354 static void
11355 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11356 {
11357         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11358         struct i40e_vsi *vsi;
11359         struct i40e_pf_vf *vf;
11360         struct i40e_tunnel_filter_list
11361                 *tunnel_list = &pf->tunnel.tunnel_list;
11362         struct i40e_tunnel_filter *f;
11363         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
11364         bool big_buffer = 0;
11365
11366         TAILQ_FOREACH(f, tunnel_list, rules) {
11367                 if (!f->is_to_vf)
11368                         vsi = pf->main_vsi;
11369                 else {
11370                         vf = &pf->vfs[f->vf_id];
11371                         vsi = vf->vsi;
11372                 }
11373                 memset(&cld_filter, 0, sizeof(cld_filter));
11374                 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
11375                         (struct ether_addr *)&cld_filter.element.outer_mac);
11376                 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
11377                         (struct ether_addr *)&cld_filter.element.inner_mac);
11378                 cld_filter.element.inner_vlan = f->input.inner_vlan;
11379                 cld_filter.element.flags = f->input.flags;
11380                 cld_filter.element.tenant_id = f->input.tenant_id;
11381                 cld_filter.element.queue_number = f->queue;
11382                 rte_memcpy(cld_filter.general_fields,
11383                            f->input.general_fields,
11384                            sizeof(f->input.general_fields));
11385
11386                 if (((f->input.flags &
11387                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11388                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11389                     ((f->input.flags &
11390                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11391                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11392                     ((f->input.flags &
11393                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11394                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
11395                         big_buffer = 1;
11396
11397                 if (big_buffer)
11398                         i40e_aq_add_cloud_filters_big_buffer(hw,
11399                                              vsi->seid, &cld_filter, 1);
11400                 else
11401                         i40e_aq_add_cloud_filters(hw, vsi->seid,
11402                                                   &cld_filter.element, 1);
11403         }
11404 }
11405
11406 /* Restore rss filter */
11407 static inline void
11408 i40e_rss_filter_restore(struct i40e_pf *pf)
11409 {
11410         struct i40e_rte_flow_rss_conf *conf =
11411                                         &pf->rss_info;
11412         if (conf->num)
11413                 i40e_config_rss_filter(pf, conf, TRUE);
11414 }
11415
11416 static void
11417 i40e_filter_restore(struct i40e_pf *pf)
11418 {
11419         i40e_ethertype_filter_restore(pf);
11420         i40e_tunnel_filter_restore(pf);
11421         i40e_fdir_filter_restore(pf);
11422         i40e_rss_filter_restore(pf);
11423 }
11424
11425 static bool
11426 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11427 {
11428         if (strcmp(dev->device->driver->name, drv->driver.name))
11429                 return false;
11430
11431         return true;
11432 }
11433
11434 bool
11435 is_i40e_supported(struct rte_eth_dev *dev)
11436 {
11437         return is_device_supported(dev, &rte_i40e_pmd);
11438 }
11439
11440 struct i40e_customized_pctype*
11441 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11442 {
11443         int i;
11444
11445         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11446                 if (pf->customized_pctype[i].index == index)
11447                         return &pf->customized_pctype[i];
11448         }
11449         return NULL;
11450 }
11451
11452 static int
11453 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11454                               uint32_t pkg_size, uint32_t proto_num,
11455                               struct rte_pmd_i40e_proto_info *proto)
11456 {
11457         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11458         uint32_t pctype_num;
11459         struct rte_pmd_i40e_ptype_info *pctype;
11460         uint32_t buff_size;
11461         struct i40e_customized_pctype *new_pctype = NULL;
11462         uint8_t proto_id;
11463         uint8_t pctype_value;
11464         char name[64];
11465         uint32_t i, j, n;
11466         int ret;
11467
11468         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11469                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
11470                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11471         if (ret) {
11472                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11473                 return -1;
11474         }
11475         if (!pctype_num) {
11476                 PMD_DRV_LOG(INFO, "No new pctype added");
11477                 return -1;
11478         }
11479
11480         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11481         pctype = rte_zmalloc("new_pctype", buff_size, 0);
11482         if (!pctype) {
11483                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11484                 return -1;
11485         }
11486         /* get information about new pctype list */
11487         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11488                                         (uint8_t *)pctype, buff_size,
11489                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11490         if (ret) {
11491                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11492                 rte_free(pctype);
11493                 return -1;
11494         }
11495
11496         /* Update customized pctype. */
11497         for (i = 0; i < pctype_num; i++) {
11498                 pctype_value = pctype[i].ptype_id;
11499                 memset(name, 0, sizeof(name));
11500                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11501                         proto_id = pctype[i].protocols[j];
11502                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11503                                 continue;
11504                         for (n = 0; n < proto_num; n++) {
11505                                 if (proto[n].proto_id != proto_id)
11506                                         continue;
11507                                 strcat(name, proto[n].name);
11508                                 strcat(name, "_");
11509                                 break;
11510                         }
11511                 }
11512                 name[strlen(name) - 1] = '\0';
11513                 if (!strcmp(name, "GTPC"))
11514                         new_pctype =
11515                                 i40e_find_customized_pctype(pf,
11516                                                       I40E_CUSTOMIZED_GTPC);
11517                 else if (!strcmp(name, "GTPU_IPV4"))
11518                         new_pctype =
11519                                 i40e_find_customized_pctype(pf,
11520                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11521                 else if (!strcmp(name, "GTPU_IPV6"))
11522                         new_pctype =
11523                                 i40e_find_customized_pctype(pf,
11524                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11525                 else if (!strcmp(name, "GTPU"))
11526                         new_pctype =
11527                                 i40e_find_customized_pctype(pf,
11528                                                       I40E_CUSTOMIZED_GTPU);
11529                 if (new_pctype) {
11530                         new_pctype->pctype = pctype_value;
11531                         new_pctype->valid = true;
11532                 }
11533         }
11534
11535         rte_free(pctype);
11536         return 0;
11537 }
11538
11539 static int
11540 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11541                                uint32_t pkg_size, uint32_t proto_num,
11542                                struct rte_pmd_i40e_proto_info *proto)
11543 {
11544         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11545         uint16_t port_id = dev->data->port_id;
11546         uint32_t ptype_num;
11547         struct rte_pmd_i40e_ptype_info *ptype;
11548         uint32_t buff_size;
11549         uint8_t proto_id;
11550         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11551         uint32_t i, j, n;
11552         bool in_tunnel;
11553         int ret;
11554
11555         /* get information about new ptype num */
11556         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11557                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
11558                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11559         if (ret) {
11560                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
11561                 return ret;
11562         }
11563         if (!ptype_num) {
11564                 PMD_DRV_LOG(INFO, "No new ptype added");
11565                 return -1;
11566         }
11567
11568         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11569         ptype = rte_zmalloc("new_ptype", buff_size, 0);
11570         if (!ptype) {
11571                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11572                 return -1;
11573         }
11574
11575         /* get information about new ptype list */
11576         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11577                                         (uint8_t *)ptype, buff_size,
11578                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11579         if (ret) {
11580                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
11581                 rte_free(ptype);
11582                 return ret;
11583         }
11584
11585         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11586         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11587         if (!ptype_mapping) {
11588                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11589                 rte_free(ptype);
11590                 return -1;
11591         }
11592
11593         /* Update ptype mapping table. */
11594         for (i = 0; i < ptype_num; i++) {
11595                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11596                 ptype_mapping[i].sw_ptype = 0;
11597                 in_tunnel = false;
11598                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11599                         proto_id = ptype[i].protocols[j];
11600                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11601                                 continue;
11602                         for (n = 0; n < proto_num; n++) {
11603                                 if (proto[n].proto_id != proto_id)
11604                                         continue;
11605                                 memset(name, 0, sizeof(name));
11606                                 strcpy(name, proto[n].name);
11607                                 if (!strncasecmp(name, "PPPOE", 5))
11608                                         ptype_mapping[i].sw_ptype |=
11609                                                 RTE_PTYPE_L2_ETHER_PPPOE;
11610                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11611                                          !in_tunnel) {
11612                                         ptype_mapping[i].sw_ptype |=
11613                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11614                                         ptype_mapping[i].sw_ptype |=
11615                                                 RTE_PTYPE_L4_FRAG;
11616                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11617                                            in_tunnel) {
11618                                         ptype_mapping[i].sw_ptype |=
11619                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11620                                         ptype_mapping[i].sw_ptype |=
11621                                                 RTE_PTYPE_INNER_L4_FRAG;
11622                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
11623                                         ptype_mapping[i].sw_ptype |=
11624                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11625                                         in_tunnel = true;
11626                                 } else if (!strncasecmp(name, "IPV4", 4) &&
11627                                            !in_tunnel)
11628                                         ptype_mapping[i].sw_ptype |=
11629                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11630                                 else if (!strncasecmp(name, "IPV4", 4) &&
11631                                          in_tunnel)
11632                                         ptype_mapping[i].sw_ptype |=
11633                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11634                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11635                                          !in_tunnel) {
11636                                         ptype_mapping[i].sw_ptype |=
11637                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11638                                         ptype_mapping[i].sw_ptype |=
11639                                                 RTE_PTYPE_L4_FRAG;
11640                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11641                                            in_tunnel) {
11642                                         ptype_mapping[i].sw_ptype |=
11643                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11644                                         ptype_mapping[i].sw_ptype |=
11645                                                 RTE_PTYPE_INNER_L4_FRAG;
11646                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
11647                                         ptype_mapping[i].sw_ptype |=
11648                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11649                                         in_tunnel = true;
11650                                 } else if (!strncasecmp(name, "IPV6", 4) &&
11651                                            !in_tunnel)
11652                                         ptype_mapping[i].sw_ptype |=
11653                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11654                                 else if (!strncasecmp(name, "IPV6", 4) &&
11655                                          in_tunnel)
11656                                         ptype_mapping[i].sw_ptype |=
11657                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11658                                 else if (!strncasecmp(name, "UDP", 3) &&
11659                                          !in_tunnel)
11660                                         ptype_mapping[i].sw_ptype |=
11661                                                 RTE_PTYPE_L4_UDP;
11662                                 else if (!strncasecmp(name, "UDP", 3) &&
11663                                          in_tunnel)
11664                                         ptype_mapping[i].sw_ptype |=
11665                                                 RTE_PTYPE_INNER_L4_UDP;
11666                                 else if (!strncasecmp(name, "TCP", 3) &&
11667                                          !in_tunnel)
11668                                         ptype_mapping[i].sw_ptype |=
11669                                                 RTE_PTYPE_L4_TCP;
11670                                 else if (!strncasecmp(name, "TCP", 3) &&
11671                                          in_tunnel)
11672                                         ptype_mapping[i].sw_ptype |=
11673                                                 RTE_PTYPE_INNER_L4_TCP;
11674                                 else if (!strncasecmp(name, "SCTP", 4) &&
11675                                          !in_tunnel)
11676                                         ptype_mapping[i].sw_ptype |=
11677                                                 RTE_PTYPE_L4_SCTP;
11678                                 else if (!strncasecmp(name, "SCTP", 4) &&
11679                                          in_tunnel)
11680                                         ptype_mapping[i].sw_ptype |=
11681                                                 RTE_PTYPE_INNER_L4_SCTP;
11682                                 else if ((!strncasecmp(name, "ICMP", 4) ||
11683                                           !strncasecmp(name, "ICMPV6", 6)) &&
11684                                          !in_tunnel)
11685                                         ptype_mapping[i].sw_ptype |=
11686                                                 RTE_PTYPE_L4_ICMP;
11687                                 else if ((!strncasecmp(name, "ICMP", 4) ||
11688                                           !strncasecmp(name, "ICMPV6", 6)) &&
11689                                          in_tunnel)
11690                                         ptype_mapping[i].sw_ptype |=
11691                                                 RTE_PTYPE_INNER_L4_ICMP;
11692                                 else if (!strncasecmp(name, "GTPC", 4)) {
11693                                         ptype_mapping[i].sw_ptype |=
11694                                                 RTE_PTYPE_TUNNEL_GTPC;
11695                                         in_tunnel = true;
11696                                 } else if (!strncasecmp(name, "GTPU", 4)) {
11697                                         ptype_mapping[i].sw_ptype |=
11698                                                 RTE_PTYPE_TUNNEL_GTPU;
11699                                         in_tunnel = true;
11700                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
11701                                         ptype_mapping[i].sw_ptype |=
11702                                                 RTE_PTYPE_TUNNEL_GRENAT;
11703                                         in_tunnel = true;
11704                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
11705                                         ptype_mapping[i].sw_ptype |=
11706                                                 RTE_PTYPE_TUNNEL_L2TP;
11707                                         in_tunnel = true;
11708                                 }
11709
11710                                 break;
11711                         }
11712                 }
11713         }
11714
11715         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
11716                                                 ptype_num, 0);
11717         if (ret)
11718                 PMD_DRV_LOG(ERR, "Failed to update mapping table.");
11719
11720         rte_free(ptype_mapping);
11721         rte_free(ptype);
11722         return ret;
11723 }
11724
11725 void
11726 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
11727                               uint32_t pkg_size)
11728 {
11729         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11730         uint32_t proto_num;
11731         struct rte_pmd_i40e_proto_info *proto;
11732         uint32_t buff_size;
11733         uint32_t i;
11734         int ret;
11735
11736         /* get information about protocol number */
11737         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11738                                        (uint8_t *)&proto_num, sizeof(proto_num),
11739                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
11740         if (ret) {
11741                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
11742                 return;
11743         }
11744         if (!proto_num) {
11745                 PMD_DRV_LOG(INFO, "No new protocol added");
11746                 return;
11747         }
11748
11749         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
11750         proto = rte_zmalloc("new_proto", buff_size, 0);
11751         if (!proto) {
11752                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11753                 return;
11754         }
11755
11756         /* get information about protocol list */
11757         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11758                                         (uint8_t *)proto, buff_size,
11759                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
11760         if (ret) {
11761                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
11762                 rte_free(proto);
11763                 return;
11764         }
11765
11766         /* Check if GTP is supported. */
11767         for (i = 0; i < proto_num; i++) {
11768                 if (!strncmp(proto[i].name, "GTP", 3)) {
11769                         pf->gtp_support = true;
11770                         break;
11771                 }
11772         }
11773
11774         /* Update customized pctype info */
11775         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
11776                                             proto_num, proto);
11777         if (ret)
11778                 PMD_DRV_LOG(INFO, "No pctype is updated.");
11779
11780         /* Update customized ptype info */
11781         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
11782                                            proto_num, proto);
11783         if (ret)
11784                 PMD_DRV_LOG(INFO, "No ptype is updated.");
11785
11786         rte_free(proto);
11787 }
11788
11789 /* Create a QinQ cloud filter
11790  *
11791  * The Fortville NIC has limited resources for tunnel filters,
11792  * so we can only reuse existing filters.
11793  *
11794  * In step 1 we define which Field Vector fields can be used for
11795  * filter types.
11796  * As we do not have the inner tag defined as a field,
11797  * we have to define it first, by reusing one of L1 entries.
11798  *
11799  * In step 2 we are replacing one of existing filter types with
11800  * a new one for QinQ.
11801  * As we reusing L1 and replacing L2, some of the default filter
11802  * types will disappear,which depends on L1 and L2 entries we reuse.
11803  *
11804  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
11805  *
11806  * 1.   Create L1 filter of outer vlan (12b) which will be in use
11807  *              later when we define the cloud filter.
11808  *      a.      Valid_flags.replace_cloud = 0
11809  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
11810  *      c.      New_filter = 0x10
11811  *      d.      TR bit = 0xff (optional, not used here)
11812  *      e.      Buffer – 2 entries:
11813  *              i.      Byte 0 = 8 (outer vlan FV index).
11814  *                      Byte 1 = 0 (rsv)
11815  *                      Byte 2-3 = 0x0fff
11816  *              ii.     Byte 0 = 37 (inner vlan FV index).
11817  *                      Byte 1 =0 (rsv)
11818  *                      Byte 2-3 = 0x0fff
11819  *
11820  * Step 2:
11821  * 2.   Create cloud filter using two L1 filters entries: stag and
11822  *              new filter(outer vlan+ inner vlan)
11823  *      a.      Valid_flags.replace_cloud = 1
11824  *      b.      Old_filter = 1 (instead of outer IP)
11825  *      c.      New_filter = 0x10
11826  *      d.      Buffer – 2 entries:
11827  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
11828  *                      Byte 1-3 = 0 (rsv)
11829  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
11830  *                      Byte 9-11 = 0 (rsv)
11831  */
11832 static int
11833 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
11834 {
11835         int ret = -ENOTSUP;
11836         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
11837         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
11838         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11839
11840         if (pf->support_multi_driver) {
11841                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
11842                 return ret;
11843         }
11844
11845         /* Init */
11846         memset(&filter_replace, 0,
11847                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
11848         memset(&filter_replace_buf, 0,
11849                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
11850
11851         /* create L1 filter */
11852         filter_replace.old_filter_type =
11853                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
11854         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11855         filter_replace.tr_bit = 0;
11856
11857         /* Prepare the buffer, 2 entries */
11858         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
11859         filter_replace_buf.data[0] |=
11860                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11861         /* Field Vector 12b mask */
11862         filter_replace_buf.data[2] = 0xff;
11863         filter_replace_buf.data[3] = 0x0f;
11864         filter_replace_buf.data[4] =
11865                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
11866         filter_replace_buf.data[4] |=
11867                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11868         /* Field Vector 12b mask */
11869         filter_replace_buf.data[6] = 0xff;
11870         filter_replace_buf.data[7] = 0x0f;
11871         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
11872                         &filter_replace_buf);
11873         if (ret != I40E_SUCCESS)
11874                 return ret;
11875         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
11876                     "cloud l1 type is changed from 0x%x to 0x%x",
11877                     filter_replace.old_filter_type,
11878                     filter_replace.new_filter_type);
11879
11880         /* Apply the second L2 cloud filter */
11881         memset(&filter_replace, 0,
11882                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
11883         memset(&filter_replace_buf, 0,
11884                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
11885
11886         /* create L2 filter, input for L2 filter will be L1 filter  */
11887         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
11888         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
11889         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11890
11891         /* Prepare the buffer, 2 entries */
11892         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
11893         filter_replace_buf.data[0] |=
11894                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11895         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11896         filter_replace_buf.data[4] |=
11897                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11898         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
11899                         &filter_replace_buf);
11900         if (!ret) {
11901                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
11902                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
11903                             "cloud filter type is changed from 0x%x to 0x%x",
11904                             filter_replace.old_filter_type,
11905                             filter_replace.new_filter_type);
11906         }
11907         return ret;
11908 }
11909
11910 int
11911 i40e_config_rss_filter(struct i40e_pf *pf,
11912                 struct i40e_rte_flow_rss_conf *conf, bool add)
11913 {
11914         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11915         uint32_t i, lut = 0;
11916         uint16_t j, num;
11917         struct rte_eth_rss_conf rss_conf = conf->rss_conf;
11918         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
11919
11920         if (!add) {
11921                 if (memcmp(conf, rss_info,
11922                         sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
11923                         i40e_pf_disable_rss(pf);
11924                         memset(rss_info, 0,
11925                                 sizeof(struct i40e_rte_flow_rss_conf));
11926                         return 0;
11927                 }
11928                 return -EINVAL;
11929         }
11930
11931         if (rss_info->num)
11932                 return -EINVAL;
11933
11934         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
11935          * It's necessary to calculate the actual PF queues that are configured.
11936          */
11937         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
11938                 num = i40e_pf_calc_configured_queues_num(pf);
11939         else
11940                 num = pf->dev_data->nb_rx_queues;
11941
11942         num = RTE_MIN(num, conf->num);
11943         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
11944                         num);
11945
11946         if (num == 0) {
11947                 PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
11948                 return -ENOTSUP;
11949         }
11950
11951         /* Fill in redirection table */
11952         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
11953                 if (j == num)
11954                         j = 0;
11955                 lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
11956                         hw->func_caps.rss_table_entry_width) - 1));
11957                 if ((i & 3) == 3)
11958                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
11959         }
11960
11961         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
11962                 i40e_pf_disable_rss(pf);
11963                 return 0;
11964         }
11965         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
11966                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
11967                 /* Random default keys */
11968                 static uint32_t rss_key_default[] = {0x6b793944,
11969                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
11970                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
11971                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
11972
11973                 rss_conf.rss_key = (uint8_t *)rss_key_default;
11974                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
11975                                                         sizeof(uint32_t);
11976         }
11977
11978         i40e_hw_rss_hash_set(pf, &rss_conf);
11979
11980         rte_memcpy(rss_info,
11981                 conf, sizeof(struct i40e_rte_flow_rss_conf));
11982
11983         return 0;
11984 }
11985
11986 RTE_INIT(i40e_init_log);
11987 static void
11988 i40e_init_log(void)
11989 {
11990         i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
11991         if (i40e_logtype_init >= 0)
11992                 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
11993         i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
11994         if (i40e_logtype_driver >= 0)
11995                 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
11996 }
11997
11998 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
11999                               QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12000                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1");