eal: remove sys/queue.h from public headers
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29 #include <rte_bitmap.h>
30 #include <rte_os_shim.h>
31
32 #include "i40e_logs.h"
33 #include "base/i40e_prototype.h"
34 #include "base/i40e_adminq_cmd.h"
35 #include "base/i40e_type.h"
36 #include "base/i40e_register.h"
37 #include "base/i40e_dcb.h"
38 #include "i40e_ethdev.h"
39 #include "i40e_rxtx.h"
40 #include "i40e_pf.h"
41 #include "i40e_regs.h"
42 #include "rte_pmd_i40e.h"
43 #include "i40e_hash.h"
44
45 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
46 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
47 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
48 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG   "queue-num-per-vf"
49 #define ETH_I40E_VF_MSG_CFG             "vf_msg_cfg"
50
51 #define I40E_CLEAR_PXE_WAIT_MS     200
52 #define I40E_VSI_TSR_QINQ_STRIP         0x4010
53 #define I40E_VSI_TSR(_i)        (0x00050800 + ((_i) * 4))
54
55 /* Maximun number of capability elements */
56 #define I40E_MAX_CAP_ELE_NUM       128
57
58 /* Wait count and interval */
59 #define I40E_CHK_Q_ENA_COUNT       1000
60 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
61
62 /* Maximun number of VSI */
63 #define I40E_MAX_NUM_VSIS          (384UL)
64
65 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
66
67 /* Flow control default timer */
68 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
69
70 /* Flow control enable fwd bit */
71 #define I40E_PRTMAC_FWD_CTRL   0x00000001
72
73 /* Receive Packet Buffer size */
74 #define I40E_RXPBSIZE (968 * 1024)
75
76 /* Kilobytes shift */
77 #define I40E_KILOSHIFT 10
78
79 /* Flow control default high water */
80 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
81
82 /* Flow control default low water */
83 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
84
85 /* Receive Average Packet Size in Byte*/
86 #define I40E_PACKET_AVERAGE_SIZE 128
87
88 /* Mask of PF interrupt causes */
89 #define I40E_PFINT_ICR0_ENA_MASK ( \
90                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
91                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
92                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
93                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
94                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
95                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
96                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
97                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
98                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
99
100 #define I40E_FLOW_TYPES ( \
101         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
104         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
105         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
106         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
107         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
108         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
109         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
110         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
111         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
112
113 /* Additional timesync values. */
114 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
115 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
116 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
117 #define I40E_PRTTSYN_TSYNENA     0x80000000
118 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
119 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
120
121 /**
122  * Below are values for writing un-exposed registers suggested
123  * by silicon experts
124  */
125 /* Destination MAC address */
126 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
127 /* Source MAC address */
128 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
129 /* Outer (S-Tag) VLAN tag in the outer L2 header */
130 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
131 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
132 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
133 /* Single VLAN tag in the inner L2 header */
134 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
135 /* Source IPv4 address */
136 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
137 /* Destination IPv4 address */
138 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
139 /* Source IPv4 address for X722 */
140 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
141 /* Destination IPv4 address for X722 */
142 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
143 /* IPv4 Protocol for X722 */
144 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
145 /* IPv4 Time to Live for X722 */
146 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
147 /* IPv4 Type of Service (TOS) */
148 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
149 /* IPv4 Protocol */
150 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
151 /* IPv4 Time to Live */
152 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
153 /* Source IPv6 address */
154 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
155 /* Destination IPv6 address */
156 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
157 /* IPv6 Traffic Class (TC) */
158 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
159 /* IPv6 Next Header */
160 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
161 /* IPv6 Hop Limit */
162 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
163 /* Source L4 port */
164 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
165 /* Destination L4 port */
166 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
167 /* SCTP verification tag */
168 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
169 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
170 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
171 /* Source port of tunneling UDP */
172 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
173 /* Destination port of tunneling UDP */
174 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
175 /* UDP Tunneling ID, NVGRE/GRE key */
176 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
177 /* Last ether type */
178 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
179 /* Tunneling outer destination IPv4 address */
180 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
181 /* Tunneling outer destination IPv6 address */
182 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
183 /* 1st word of flex payload */
184 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
185 /* 2nd word of flex payload */
186 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
187 /* 3rd word of flex payload */
188 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
189 /* 4th word of flex payload */
190 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
191 /* 5th word of flex payload */
192 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
193 /* 6th word of flex payload */
194 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
195 /* 7th word of flex payload */
196 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
197 /* 8th word of flex payload */
198 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
199 /* all 8 words flex payload */
200 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
201 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
202
203 #define I40E_TRANSLATE_INSET 0
204 #define I40E_TRANSLATE_REG   1
205
206 #define I40E_INSET_IPV4_TOS_MASK        0x0000FF00UL
207 #define I40E_INSET_IPV4_TTL_MASK        0x000000FFUL
208 #define I40E_INSET_IPV4_PROTO_MASK      0x0000FF00UL
209 #define I40E_INSET_IPV6_TC_MASK         0x0000F00FUL
210 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x0000FF00UL
211 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000000FFUL
212
213 /* PCI offset for querying capability */
214 #define PCI_DEV_CAP_REG            0xA4
215 /* PCI offset for enabling/disabling Extended Tag */
216 #define PCI_DEV_CTRL_REG           0xA8
217 /* Bit mask of Extended Tag capability */
218 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
219 /* Bit shift of Extended Tag enable/disable */
220 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
221 /* Bit mask of Extended Tag enable/disable */
222 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
223
224 #define I40E_GLQF_PIT_IPV4_START        2
225 #define I40E_GLQF_PIT_IPV4_COUNT        2
226 #define I40E_GLQF_PIT_IPV6_START        4
227 #define I40E_GLQF_PIT_IPV6_COUNT        2
228
229 #define I40E_GLQF_PIT_SOURCE_OFF_GET(a) \
230                                 (((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \
231                                  I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
232
233 #define I40E_GLQF_PIT_DEST_OFF_GET(a) \
234                                 (((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \
235                                  I40E_GLQF_PIT_DEST_OFF_SHIFT)
236
237 #define I40E_GLQF_PIT_FSIZE_GET(a)      (((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \
238                                          I40E_GLQF_PIT_FSIZE_SHIFT)
239
240 #define I40E_GLQF_PIT_BUILD(off, mask)  (((off) << 16) | (mask))
241 #define I40E_FDIR_FIELD_OFFSET(a)       ((a) >> 1)
242
243 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
244 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
245 static int i40e_dev_configure(struct rte_eth_dev *dev);
246 static int i40e_dev_start(struct rte_eth_dev *dev);
247 static int i40e_dev_stop(struct rte_eth_dev *dev);
248 static int i40e_dev_close(struct rte_eth_dev *dev);
249 static int  i40e_dev_reset(struct rte_eth_dev *dev);
250 static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
251 static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
252 static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
253 static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
254 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
255 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
256 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
257                                struct rte_eth_stats *stats);
258 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
259                                struct rte_eth_xstat *xstats, unsigned n);
260 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
261                                      struct rte_eth_xstat_name *xstats_names,
262                                      unsigned limit);
263 static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
264 static int i40e_fw_version_get(struct rte_eth_dev *dev,
265                                 char *fw_version, size_t fw_size);
266 static int i40e_dev_info_get(struct rte_eth_dev *dev,
267                              struct rte_eth_dev_info *dev_info);
268 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
269                                 uint16_t vlan_id,
270                                 int on);
271 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
272                               enum rte_vlan_type vlan_type,
273                               uint16_t tpid);
274 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
275 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
276                                       uint16_t queue,
277                                       int on);
278 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
279 static int i40e_dev_led_on(struct rte_eth_dev *dev);
280 static int i40e_dev_led_off(struct rte_eth_dev *dev);
281 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
282                               struct rte_eth_fc_conf *fc_conf);
283 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
284                               struct rte_eth_fc_conf *fc_conf);
285 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
286                                        struct rte_eth_pfc_conf *pfc_conf);
287 static int i40e_macaddr_add(struct rte_eth_dev *dev,
288                             struct rte_ether_addr *mac_addr,
289                             uint32_t index,
290                             uint32_t pool);
291 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
292 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
293                                     struct rte_eth_rss_reta_entry64 *reta_conf,
294                                     uint16_t reta_size);
295 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
296                                    struct rte_eth_rss_reta_entry64 *reta_conf,
297                                    uint16_t reta_size);
298
299 static int i40e_get_cap(struct i40e_hw *hw);
300 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
301 static int i40e_pf_setup(struct i40e_pf *pf);
302 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
303 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
304 static int i40e_dcb_setup(struct rte_eth_dev *dev);
305 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
306                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
307 static void i40e_stat_update_48(struct i40e_hw *hw,
308                                uint32_t hireg,
309                                uint32_t loreg,
310                                bool offset_loaded,
311                                uint64_t *offset,
312                                uint64_t *stat);
313 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
314 static void i40e_dev_interrupt_handler(void *param);
315 static void i40e_dev_alarm_handler(void *param);
316 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
317                                 uint32_t base, uint32_t num);
318 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
319 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
320                         uint32_t base);
321 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
322                         uint16_t num);
323 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
324 static int i40e_veb_release(struct i40e_veb *veb);
325 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
326                                                 struct i40e_vsi *vsi);
327 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
328 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
329                                              struct i40e_macvlan_filter *mv_f,
330                                              int num,
331                                              uint16_t vlan);
332 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
333 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
334                                     struct rte_eth_rss_conf *rss_conf);
335 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
336                                       struct rte_eth_rss_conf *rss_conf);
337 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
338                                         struct rte_eth_udp_tunnel *udp_tunnel);
339 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
340                                         struct rte_eth_udp_tunnel *udp_tunnel);
341 static void i40e_filter_input_set_init(struct i40e_pf *pf);
342 static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
343                                  const struct rte_flow_ops **ops);
344 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
345                                   struct rte_eth_dcb_info *dcb_info);
346 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
347 static void i40e_configure_registers(struct i40e_hw *hw);
348 static void i40e_hw_init(struct rte_eth_dev *dev);
349 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
350 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
351                                                      uint16_t seid,
352                                                      uint16_t rule_type,
353                                                      uint16_t *entries,
354                                                      uint16_t count,
355                                                      uint16_t rule_id);
356 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
357                         struct rte_eth_mirror_conf *mirror_conf,
358                         uint8_t sw_id, uint8_t on);
359 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
360
361 static int i40e_timesync_enable(struct rte_eth_dev *dev);
362 static int i40e_timesync_disable(struct rte_eth_dev *dev);
363 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
364                                            struct timespec *timestamp,
365                                            uint32_t flags);
366 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
367                                            struct timespec *timestamp);
368 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
369
370 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
371
372 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
373                                    struct timespec *timestamp);
374 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
375                                     const struct timespec *timestamp);
376
377 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
378                                          uint16_t queue_id);
379 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
380                                           uint16_t queue_id);
381
382 static int i40e_get_regs(struct rte_eth_dev *dev,
383                          struct rte_dev_reg_info *regs);
384
385 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
386
387 static int i40e_get_eeprom(struct rte_eth_dev *dev,
388                            struct rte_dev_eeprom_info *eeprom);
389
390 static int i40e_get_module_info(struct rte_eth_dev *dev,
391                                 struct rte_eth_dev_module_info *modinfo);
392 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
393                                   struct rte_dev_eeprom_info *info);
394
395 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
396                                       struct rte_ether_addr *mac_addr);
397
398 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
399
400 static int i40e_ethertype_filter_convert(
401         const struct rte_eth_ethertype_filter *input,
402         struct i40e_ethertype_filter *filter);
403 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
404                                    struct i40e_ethertype_filter *filter);
405
406 static int i40e_tunnel_filter_convert(
407         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
408         struct i40e_tunnel_filter *tunnel_filter);
409 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
410                                 struct i40e_tunnel_filter *tunnel_filter);
411 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
412
413 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
414 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
415 static void i40e_filter_restore(struct i40e_pf *pf);
416 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
417
418 static const char *const valid_keys[] = {
419         ETH_I40E_FLOATING_VEB_ARG,
420         ETH_I40E_FLOATING_VEB_LIST_ARG,
421         ETH_I40E_SUPPORT_MULTI_DRIVER,
422         ETH_I40E_QUEUE_NUM_PER_VF_ARG,
423         ETH_I40E_VF_MSG_CFG,
424         NULL};
425
426 static const struct rte_pci_id pci_id_i40e_map[] = {
427         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
428         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
429         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
430         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
431         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
432         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
433         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
434         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
435         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
436         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
437         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
438         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
439         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
440         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
441         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
442         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
443         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
444         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
445         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
446         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
447         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
448         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
449         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
450         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
451         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
452         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
453         { .vendor_id = 0, /* sentinel */ },
454 };
455
456 static const struct eth_dev_ops i40e_eth_dev_ops = {
457         .dev_configure                = i40e_dev_configure,
458         .dev_start                    = i40e_dev_start,
459         .dev_stop                     = i40e_dev_stop,
460         .dev_close                    = i40e_dev_close,
461         .dev_reset                    = i40e_dev_reset,
462         .promiscuous_enable           = i40e_dev_promiscuous_enable,
463         .promiscuous_disable          = i40e_dev_promiscuous_disable,
464         .allmulticast_enable          = i40e_dev_allmulticast_enable,
465         .allmulticast_disable         = i40e_dev_allmulticast_disable,
466         .dev_set_link_up              = i40e_dev_set_link_up,
467         .dev_set_link_down            = i40e_dev_set_link_down,
468         .link_update                  = i40e_dev_link_update,
469         .stats_get                    = i40e_dev_stats_get,
470         .xstats_get                   = i40e_dev_xstats_get,
471         .xstats_get_names             = i40e_dev_xstats_get_names,
472         .stats_reset                  = i40e_dev_stats_reset,
473         .xstats_reset                 = i40e_dev_stats_reset,
474         .fw_version_get               = i40e_fw_version_get,
475         .dev_infos_get                = i40e_dev_info_get,
476         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
477         .vlan_filter_set              = i40e_vlan_filter_set,
478         .vlan_tpid_set                = i40e_vlan_tpid_set,
479         .vlan_offload_set             = i40e_vlan_offload_set,
480         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
481         .vlan_pvid_set                = i40e_vlan_pvid_set,
482         .rx_queue_start               = i40e_dev_rx_queue_start,
483         .rx_queue_stop                = i40e_dev_rx_queue_stop,
484         .tx_queue_start               = i40e_dev_tx_queue_start,
485         .tx_queue_stop                = i40e_dev_tx_queue_stop,
486         .rx_queue_setup               = i40e_dev_rx_queue_setup,
487         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
488         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
489         .rx_queue_release             = i40e_dev_rx_queue_release,
490         .tx_queue_setup               = i40e_dev_tx_queue_setup,
491         .tx_queue_release             = i40e_dev_tx_queue_release,
492         .dev_led_on                   = i40e_dev_led_on,
493         .dev_led_off                  = i40e_dev_led_off,
494         .flow_ctrl_get                = i40e_flow_ctrl_get,
495         .flow_ctrl_set                = i40e_flow_ctrl_set,
496         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
497         .mac_addr_add                 = i40e_macaddr_add,
498         .mac_addr_remove              = i40e_macaddr_remove,
499         .reta_update                  = i40e_dev_rss_reta_update,
500         .reta_query                   = i40e_dev_rss_reta_query,
501         .rss_hash_update              = i40e_dev_rss_hash_update,
502         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
503         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
504         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
505         .flow_ops_get                 = i40e_dev_flow_ops_get,
506         .rxq_info_get                 = i40e_rxq_info_get,
507         .txq_info_get                 = i40e_txq_info_get,
508         .rx_burst_mode_get            = i40e_rx_burst_mode_get,
509         .tx_burst_mode_get            = i40e_tx_burst_mode_get,
510         .mirror_rule_set              = i40e_mirror_rule_set,
511         .mirror_rule_reset            = i40e_mirror_rule_reset,
512         .timesync_enable              = i40e_timesync_enable,
513         .timesync_disable             = i40e_timesync_disable,
514         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
515         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
516         .get_dcb_info                 = i40e_dev_get_dcb_info,
517         .timesync_adjust_time         = i40e_timesync_adjust_time,
518         .timesync_read_time           = i40e_timesync_read_time,
519         .timesync_write_time          = i40e_timesync_write_time,
520         .get_reg                      = i40e_get_regs,
521         .get_eeprom_length            = i40e_get_eeprom_length,
522         .get_eeprom                   = i40e_get_eeprom,
523         .get_module_info              = i40e_get_module_info,
524         .get_module_eeprom            = i40e_get_module_eeprom,
525         .mac_addr_set                 = i40e_set_default_mac_addr,
526         .mtu_set                      = i40e_dev_mtu_set,
527         .tm_ops_get                   = i40e_tm_ops_get,
528         .tx_done_cleanup              = i40e_tx_done_cleanup,
529         .get_monitor_addr             = i40e_get_monitor_addr,
530 };
531
532 /* store statistics names and its offset in stats structure */
533 struct rte_i40e_xstats_name_off {
534         char name[RTE_ETH_XSTATS_NAME_SIZE];
535         unsigned offset;
536 };
537
538 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
539         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
540         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
541         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
542         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
543         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
544                 rx_unknown_protocol)},
545         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
546         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
547         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
548         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
549 };
550
551 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
552                 sizeof(rte_i40e_stats_strings[0]))
553
554 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
555         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
556                 tx_dropped_link_down)},
557         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
558         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
559                 illegal_bytes)},
560         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
561         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
562                 mac_local_faults)},
563         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
564                 mac_remote_faults)},
565         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
566                 rx_length_errors)},
567         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
568         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
569         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
570         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
571         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
572         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
573                 rx_size_127)},
574         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
575                 rx_size_255)},
576         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
577                 rx_size_511)},
578         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
579                 rx_size_1023)},
580         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
581                 rx_size_1522)},
582         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
583                 rx_size_big)},
584         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
585                 rx_undersize)},
586         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
587                 rx_oversize)},
588         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
589                 mac_short_packet_dropped)},
590         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
591                 rx_fragments)},
592         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
593         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
594         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
595                 tx_size_127)},
596         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
597                 tx_size_255)},
598         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
599                 tx_size_511)},
600         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
601                 tx_size_1023)},
602         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
603                 tx_size_1522)},
604         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
605                 tx_size_big)},
606         {"rx_flow_director_atr_match_packets",
607                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
608         {"rx_flow_director_sb_match_packets",
609                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
610         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
611                 tx_lpi_status)},
612         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
613                 rx_lpi_status)},
614         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
615                 tx_lpi_count)},
616         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
617                 rx_lpi_count)},
618 };
619
620 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
621                 sizeof(rte_i40e_hw_port_strings[0]))
622
623 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
624         {"xon_packets", offsetof(struct i40e_hw_port_stats,
625                 priority_xon_rx)},
626         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
627                 priority_xoff_rx)},
628 };
629
630 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
631                 sizeof(rte_i40e_rxq_prio_strings[0]))
632
633 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
634         {"xon_packets", offsetof(struct i40e_hw_port_stats,
635                 priority_xon_tx)},
636         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
637                 priority_xoff_tx)},
638         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
639                 priority_xon_2_xoff)},
640 };
641
642 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
643                 sizeof(rte_i40e_txq_prio_strings[0]))
644
645 static int
646 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
647         struct rte_pci_device *pci_dev)
648 {
649         char name[RTE_ETH_NAME_MAX_LEN];
650         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
651         int i, retval;
652
653         if (pci_dev->device.devargs) {
654                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
655                                 &eth_da);
656                 if (retval)
657                         return retval;
658         }
659
660         if (eth_da.nb_representor_ports > 0 &&
661             eth_da.type != RTE_ETH_REPRESENTOR_VF) {
662                 PMD_DRV_LOG(ERR, "unsupported representor type: %s\n",
663                             pci_dev->device.devargs->args);
664                 return -ENOTSUP;
665         }
666
667         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
668                 sizeof(struct i40e_adapter),
669                 eth_dev_pci_specific_init, pci_dev,
670                 eth_i40e_dev_init, NULL);
671
672         if (retval || eth_da.nb_representor_ports < 1)
673                 return retval;
674
675         /* probe VF representor ports */
676         struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
677                 pci_dev->device.name);
678
679         if (pf_ethdev == NULL)
680                 return -ENODEV;
681
682         for (i = 0; i < eth_da.nb_representor_ports; i++) {
683                 struct i40e_vf_representor representor = {
684                         .vf_id = eth_da.representor_ports[i],
685                         .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
686                                 pf_ethdev->data->dev_private)->switch_domain_id,
687                         .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
688                                 pf_ethdev->data->dev_private)
689                 };
690
691                 /* representor port net_bdf_port */
692                 snprintf(name, sizeof(name), "net_%s_representor_%d",
693                         pci_dev->device.name, eth_da.representor_ports[i]);
694
695                 retval = rte_eth_dev_create(&pci_dev->device, name,
696                         sizeof(struct i40e_vf_representor), NULL, NULL,
697                         i40e_vf_representor_init, &representor);
698
699                 if (retval)
700                         PMD_DRV_LOG(ERR, "failed to create i40e vf "
701                                 "representor %s.", name);
702         }
703
704         return 0;
705 }
706
707 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
708 {
709         struct rte_eth_dev *ethdev;
710
711         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
712         if (!ethdev)
713                 return 0;
714
715         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
716                 return rte_eth_dev_pci_generic_remove(pci_dev,
717                                         i40e_vf_representor_uninit);
718         else
719                 return rte_eth_dev_pci_generic_remove(pci_dev,
720                                                 eth_i40e_dev_uninit);
721 }
722
723 static struct rte_pci_driver rte_i40e_pmd = {
724         .id_table = pci_id_i40e_map,
725         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
726         .probe = eth_i40e_pci_probe,
727         .remove = eth_i40e_pci_remove,
728 };
729
730 static inline void
731 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
732                          uint32_t reg_val)
733 {
734         uint32_t ori_reg_val;
735         struct rte_eth_dev_data *dev_data =
736                 ((struct i40e_adapter *)hw->back)->pf.dev_data;
737         struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
738
739         ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
740         i40e_write_rx_ctl(hw, reg_addr, reg_val);
741         if (ori_reg_val != reg_val)
742                 PMD_DRV_LOG(WARNING,
743                             "i40e device %s changed global register [0x%08x]."
744                             " original: 0x%08x, new: 0x%08x",
745                             dev->device->name, reg_addr, ori_reg_val, reg_val);
746 }
747
748 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
749 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
750 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
751
752 #ifndef I40E_GLQF_ORT
753 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
754 #endif
755 #ifndef I40E_GLQF_PIT
756 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
757 #endif
758 #ifndef I40E_GLQF_L3_MAP
759 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
760 #endif
761
762 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
763 {
764         /*
765          * Initialize registers for parsing packet type of QinQ
766          * This should be removed from code once proper
767          * configuration API is added to avoid configuration conflicts
768          * between ports of the same device.
769          */
770         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
771         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
772 }
773
774 static inline void i40e_config_automask(struct i40e_pf *pf)
775 {
776         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
777         uint32_t val;
778
779         /* INTENA flag is not auto-cleared for interrupt */
780         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
781         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
782                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
783
784         /* If support multi-driver, PF will use INT0. */
785         if (!pf->support_multi_driver)
786                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
787
788         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
789 }
790
791 static inline void i40e_clear_automask(struct i40e_pf *pf)
792 {
793         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
794         uint32_t val;
795
796         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
797         val &= ~(I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
798                  I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK);
799
800         if (!pf->support_multi_driver)
801                 val &= ~I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
802
803         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
804 }
805
806 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
807
808 /*
809  * Add a ethertype filter to drop all flow control frames transmitted
810  * from VSIs.
811 */
812 static void
813 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
814 {
815         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
816         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
817                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
818                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
819         int ret;
820
821         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
822                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
823                                 pf->main_vsi_seid, 0,
824                                 TRUE, NULL, NULL);
825         if (ret)
826                 PMD_INIT_LOG(ERR,
827                         "Failed to add filter to drop flow control frames from VSIs.");
828 }
829
830 static int
831 floating_veb_list_handler(__rte_unused const char *key,
832                           const char *floating_veb_value,
833                           void *opaque)
834 {
835         int idx = 0;
836         unsigned int count = 0;
837         char *end = NULL;
838         int min, max;
839         bool *vf_floating_veb = opaque;
840
841         while (isblank(*floating_veb_value))
842                 floating_veb_value++;
843
844         /* Reset floating VEB configuration for VFs */
845         for (idx = 0; idx < I40E_MAX_VF; idx++)
846                 vf_floating_veb[idx] = false;
847
848         min = I40E_MAX_VF;
849         do {
850                 while (isblank(*floating_veb_value))
851                         floating_veb_value++;
852                 if (*floating_veb_value == '\0')
853                         return -1;
854                 errno = 0;
855                 idx = strtoul(floating_veb_value, &end, 10);
856                 if (errno || end == NULL)
857                         return -1;
858                 if (idx < 0)
859                         return -1;
860                 while (isblank(*end))
861                         end++;
862                 if (*end == '-') {
863                         min = idx;
864                 } else if ((*end == ';') || (*end == '\0')) {
865                         max = idx;
866                         if (min == I40E_MAX_VF)
867                                 min = idx;
868                         if (max >= I40E_MAX_VF)
869                                 max = I40E_MAX_VF - 1;
870                         for (idx = min; idx <= max; idx++) {
871                                 vf_floating_veb[idx] = true;
872                                 count++;
873                         }
874                         min = I40E_MAX_VF;
875                 } else {
876                         return -1;
877                 }
878                 floating_veb_value = end + 1;
879         } while (*end != '\0');
880
881         if (count == 0)
882                 return -1;
883
884         return 0;
885 }
886
887 static void
888 config_vf_floating_veb(struct rte_devargs *devargs,
889                        uint16_t floating_veb,
890                        bool *vf_floating_veb)
891 {
892         struct rte_kvargs *kvlist;
893         int i;
894         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
895
896         if (!floating_veb)
897                 return;
898         /* All the VFs attach to the floating VEB by default
899          * when the floating VEB is enabled.
900          */
901         for (i = 0; i < I40E_MAX_VF; i++)
902                 vf_floating_veb[i] = true;
903
904         if (devargs == NULL)
905                 return;
906
907         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
908         if (kvlist == NULL)
909                 return;
910
911         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
912                 rte_kvargs_free(kvlist);
913                 return;
914         }
915         /* When the floating_veb_list parameter exists, all the VFs
916          * will attach to the legacy VEB firstly, then configure VFs
917          * to the floating VEB according to the floating_veb_list.
918          */
919         if (rte_kvargs_process(kvlist, floating_veb_list,
920                                floating_veb_list_handler,
921                                vf_floating_veb) < 0) {
922                 rte_kvargs_free(kvlist);
923                 return;
924         }
925         rte_kvargs_free(kvlist);
926 }
927
928 static int
929 i40e_check_floating_handler(__rte_unused const char *key,
930                             const char *value,
931                             __rte_unused void *opaque)
932 {
933         if (strcmp(value, "1"))
934                 return -1;
935
936         return 0;
937 }
938
939 static int
940 is_floating_veb_supported(struct rte_devargs *devargs)
941 {
942         struct rte_kvargs *kvlist;
943         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
944
945         if (devargs == NULL)
946                 return 0;
947
948         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
949         if (kvlist == NULL)
950                 return 0;
951
952         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
953                 rte_kvargs_free(kvlist);
954                 return 0;
955         }
956         /* Floating VEB is enabled when there's key-value:
957          * enable_floating_veb=1
958          */
959         if (rte_kvargs_process(kvlist, floating_veb_key,
960                                i40e_check_floating_handler, NULL) < 0) {
961                 rte_kvargs_free(kvlist);
962                 return 0;
963         }
964         rte_kvargs_free(kvlist);
965
966         return 1;
967 }
968
969 static void
970 config_floating_veb(struct rte_eth_dev *dev)
971 {
972         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
973         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
974         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
975
976         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
977
978         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
979                 pf->floating_veb =
980                         is_floating_veb_supported(pci_dev->device.devargs);
981                 config_vf_floating_veb(pci_dev->device.devargs,
982                                        pf->floating_veb,
983                                        pf->floating_veb_list);
984         } else {
985                 pf->floating_veb = false;
986         }
987 }
988
989 #define I40E_L2_TAGS_S_TAG_SHIFT 1
990 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
991
992 static int
993 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
994 {
995         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
996         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
997         char ethertype_hash_name[RTE_HASH_NAMESIZE];
998         int ret;
999
1000         struct rte_hash_parameters ethertype_hash_params = {
1001                 .name = ethertype_hash_name,
1002                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
1003                 .key_len = sizeof(struct i40e_ethertype_filter_input),
1004                 .hash_func = rte_hash_crc,
1005                 .hash_func_init_val = 0,
1006                 .socket_id = rte_socket_id(),
1007         };
1008
1009         /* Initialize ethertype filter rule list and hash */
1010         TAILQ_INIT(&ethertype_rule->ethertype_list);
1011         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
1012                  "ethertype_%s", dev->device->name);
1013         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
1014         if (!ethertype_rule->hash_table) {
1015                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
1016                 return -EINVAL;
1017         }
1018         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
1019                                        sizeof(struct i40e_ethertype_filter *) *
1020                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
1021                                        0);
1022         if (!ethertype_rule->hash_map) {
1023                 PMD_INIT_LOG(ERR,
1024                              "Failed to allocate memory for ethertype hash map!");
1025                 ret = -ENOMEM;
1026                 goto err_ethertype_hash_map_alloc;
1027         }
1028
1029         return 0;
1030
1031 err_ethertype_hash_map_alloc:
1032         rte_hash_free(ethertype_rule->hash_table);
1033
1034         return ret;
1035 }
1036
1037 static int
1038 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1039 {
1040         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1041         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1042         char tunnel_hash_name[RTE_HASH_NAMESIZE];
1043         int ret;
1044
1045         struct rte_hash_parameters tunnel_hash_params = {
1046                 .name = tunnel_hash_name,
1047                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
1048                 .key_len = sizeof(struct i40e_tunnel_filter_input),
1049                 .hash_func = rte_hash_crc,
1050                 .hash_func_init_val = 0,
1051                 .socket_id = rte_socket_id(),
1052         };
1053
1054         /* Initialize tunnel filter rule list and hash */
1055         TAILQ_INIT(&tunnel_rule->tunnel_list);
1056         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1057                  "tunnel_%s", dev->device->name);
1058         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1059         if (!tunnel_rule->hash_table) {
1060                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1061                 return -EINVAL;
1062         }
1063         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1064                                     sizeof(struct i40e_tunnel_filter *) *
1065                                     I40E_MAX_TUNNEL_FILTER_NUM,
1066                                     0);
1067         if (!tunnel_rule->hash_map) {
1068                 PMD_INIT_LOG(ERR,
1069                              "Failed to allocate memory for tunnel hash map!");
1070                 ret = -ENOMEM;
1071                 goto err_tunnel_hash_map_alloc;
1072         }
1073
1074         return 0;
1075
1076 err_tunnel_hash_map_alloc:
1077         rte_hash_free(tunnel_rule->hash_table);
1078
1079         return ret;
1080 }
1081
1082 static int
1083 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1084 {
1085         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1086         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1087         struct i40e_fdir_info *fdir_info = &pf->fdir;
1088         char fdir_hash_name[RTE_HASH_NAMESIZE];
1089         uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1090         uint32_t best = hw->func_caps.fd_filters_best_effort;
1091         enum i40e_filter_pctype pctype;
1092         struct rte_bitmap *bmp = NULL;
1093         uint32_t bmp_size;
1094         void *mem = NULL;
1095         uint32_t i = 0;
1096         int ret;
1097
1098         struct rte_hash_parameters fdir_hash_params = {
1099                 .name = fdir_hash_name,
1100                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1101                 .key_len = sizeof(struct i40e_fdir_input),
1102                 .hash_func = rte_hash_crc,
1103                 .hash_func_init_val = 0,
1104                 .socket_id = rte_socket_id(),
1105         };
1106
1107         /* Initialize flow director filter rule list and hash */
1108         TAILQ_INIT(&fdir_info->fdir_list);
1109         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1110                  "fdir_%s", dev->device->name);
1111         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1112         if (!fdir_info->hash_table) {
1113                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1114                 return -EINVAL;
1115         }
1116
1117         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1118                                           sizeof(struct i40e_fdir_filter *) *
1119                                           I40E_MAX_FDIR_FILTER_NUM,
1120                                           0);
1121         if (!fdir_info->hash_map) {
1122                 PMD_INIT_LOG(ERR,
1123                              "Failed to allocate memory for fdir hash map!");
1124                 ret = -ENOMEM;
1125                 goto err_fdir_hash_map_alloc;
1126         }
1127
1128         fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1129                         sizeof(struct i40e_fdir_filter) *
1130                         I40E_MAX_FDIR_FILTER_NUM,
1131                         0);
1132
1133         if (!fdir_info->fdir_filter_array) {
1134                 PMD_INIT_LOG(ERR,
1135                              "Failed to allocate memory for fdir filter array!");
1136                 ret = -ENOMEM;
1137                 goto err_fdir_filter_array_alloc;
1138         }
1139
1140         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1141              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
1142                 pf->fdir.flow_count[pctype] = 0;
1143
1144         fdir_info->fdir_space_size = alloc + best;
1145         fdir_info->fdir_actual_cnt = 0;
1146         fdir_info->fdir_guarantee_total_space = alloc;
1147         fdir_info->fdir_guarantee_free_space =
1148                 fdir_info->fdir_guarantee_total_space;
1149
1150         PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1151
1152         fdir_info->fdir_flow_pool.pool =
1153                         rte_zmalloc("i40e_fdir_entry",
1154                                 sizeof(struct i40e_fdir_entry) *
1155                                 fdir_info->fdir_space_size,
1156                                 0);
1157
1158         if (!fdir_info->fdir_flow_pool.pool) {
1159                 PMD_INIT_LOG(ERR,
1160                              "Failed to allocate memory for bitmap flow!");
1161                 ret = -ENOMEM;
1162                 goto err_fdir_bitmap_flow_alloc;
1163         }
1164
1165         for (i = 0; i < fdir_info->fdir_space_size; i++)
1166                 fdir_info->fdir_flow_pool.pool[i].idx = i;
1167
1168         bmp_size =
1169                 rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1170         mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1171         if (mem == NULL) {
1172                 PMD_INIT_LOG(ERR,
1173                              "Failed to allocate memory for fdir bitmap!");
1174                 ret = -ENOMEM;
1175                 goto err_fdir_mem_alloc;
1176         }
1177         bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1178         if (bmp == NULL) {
1179                 PMD_INIT_LOG(ERR,
1180                              "Failed to initialization fdir bitmap!");
1181                 ret = -ENOMEM;
1182                 goto err_fdir_bmp_alloc;
1183         }
1184         for (i = 0; i < fdir_info->fdir_space_size; i++)
1185                 rte_bitmap_set(bmp, i);
1186
1187         fdir_info->fdir_flow_pool.bitmap = bmp;
1188
1189         return 0;
1190
1191 err_fdir_bmp_alloc:
1192         rte_free(mem);
1193 err_fdir_mem_alloc:
1194         rte_free(fdir_info->fdir_flow_pool.pool);
1195 err_fdir_bitmap_flow_alloc:
1196         rte_free(fdir_info->fdir_filter_array);
1197 err_fdir_filter_array_alloc:
1198         rte_free(fdir_info->hash_map);
1199 err_fdir_hash_map_alloc:
1200         rte_hash_free(fdir_info->hash_table);
1201
1202         return ret;
1203 }
1204
1205 static void
1206 i40e_init_customized_info(struct i40e_pf *pf)
1207 {
1208         int i;
1209
1210         /* Initialize customized pctype */
1211         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1212                 pf->customized_pctype[i].index = i;
1213                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1214                 pf->customized_pctype[i].valid = false;
1215         }
1216
1217         pf->gtp_support = false;
1218         pf->esp_support = false;
1219 }
1220
1221 static void
1222 i40e_init_filter_invalidation(struct i40e_pf *pf)
1223 {
1224         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1225         struct i40e_fdir_info *fdir_info = &pf->fdir;
1226         uint32_t glqf_ctl_reg = 0;
1227
1228         glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1229         if (!pf->support_multi_driver) {
1230                 fdir_info->fdir_invalprio = 1;
1231                 glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1232                 PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1233                 i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1234         } else {
1235                 if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1236                         fdir_info->fdir_invalprio = 1;
1237                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1238                 } else {
1239                         fdir_info->fdir_invalprio = 0;
1240                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1241                 }
1242         }
1243 }
1244
1245 void
1246 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1247 {
1248         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1249         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1250         struct i40e_queue_regions *info = &pf->queue_region;
1251         uint16_t i;
1252
1253         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1254                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1255
1256         memset(info, 0, sizeof(struct i40e_queue_regions));
1257 }
1258
1259 static int
1260 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1261                                const char *value,
1262                                void *opaque)
1263 {
1264         struct i40e_pf *pf;
1265         unsigned long support_multi_driver;
1266         char *end;
1267
1268         pf = (struct i40e_pf *)opaque;
1269
1270         errno = 0;
1271         support_multi_driver = strtoul(value, &end, 10);
1272         if (errno != 0 || end == value || *end != 0) {
1273                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1274                 return -(EINVAL);
1275         }
1276
1277         if (support_multi_driver == 1 || support_multi_driver == 0)
1278                 pf->support_multi_driver = (bool)support_multi_driver;
1279         else
1280                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1281                             "enable global configuration by default."
1282                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1283         return 0;
1284 }
1285
1286 static int
1287 i40e_support_multi_driver(struct rte_eth_dev *dev)
1288 {
1289         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1290         struct rte_kvargs *kvlist;
1291         int kvargs_count;
1292
1293         /* Enable global configuration by default */
1294         pf->support_multi_driver = false;
1295
1296         if (!dev->device->devargs)
1297                 return 0;
1298
1299         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1300         if (!kvlist)
1301                 return -EINVAL;
1302
1303         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1304         if (!kvargs_count) {
1305                 rte_kvargs_free(kvlist);
1306                 return 0;
1307         }
1308
1309         if (kvargs_count > 1)
1310                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1311                             "the first invalid or last valid one is used !",
1312                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1313
1314         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1315                                i40e_parse_multi_drv_handler, pf) < 0) {
1316                 rte_kvargs_free(kvlist);
1317                 return -EINVAL;
1318         }
1319
1320         rte_kvargs_free(kvlist);
1321         return 0;
1322 }
1323
1324 static int
1325 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1326                                     uint32_t reg_addr, uint64_t reg_val,
1327                                     struct i40e_asq_cmd_details *cmd_details)
1328 {
1329         uint64_t ori_reg_val;
1330         struct rte_eth_dev_data *dev_data =
1331                 ((struct i40e_adapter *)hw->back)->pf.dev_data;
1332         struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
1333         int ret;
1334
1335         ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1336         if (ret != I40E_SUCCESS) {
1337                 PMD_DRV_LOG(ERR,
1338                             "Fail to debug read from 0x%08x",
1339                             reg_addr);
1340                 return -EIO;
1341         }
1342
1343         if (ori_reg_val != reg_val)
1344                 PMD_DRV_LOG(WARNING,
1345                             "i40e device %s changed global register [0x%08x]."
1346                             " original: 0x%"PRIx64", after: 0x%"PRIx64,
1347                             dev->device->name, reg_addr, ori_reg_val, reg_val);
1348
1349         return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1350 }
1351
1352 static int
1353 read_vf_msg_config(__rte_unused const char *key,
1354                                const char *value,
1355                                void *opaque)
1356 {
1357         struct i40e_vf_msg_cfg *cfg = opaque;
1358
1359         if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1360                         &cfg->ignore_second) != 3) {
1361                 memset(cfg, 0, sizeof(*cfg));
1362                 PMD_DRV_LOG(ERR, "format error! example: "
1363                                 "%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1364                 return -EINVAL;
1365         }
1366
1367         /*
1368          * If the message validation function been enabled, the 'period'
1369          * and 'ignore_second' must greater than 0.
1370          */
1371         if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1372                 memset(cfg, 0, sizeof(*cfg));
1373                 PMD_DRV_LOG(ERR, "%s error! the second and third"
1374                                 " number must be greater than 0!",
1375                                 ETH_I40E_VF_MSG_CFG);
1376                 return -EINVAL;
1377         }
1378
1379         return 0;
1380 }
1381
1382 static int
1383 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1384                 struct i40e_vf_msg_cfg *msg_cfg)
1385 {
1386         struct rte_kvargs *kvlist;
1387         int kvargs_count;
1388         int ret = 0;
1389
1390         memset(msg_cfg, 0, sizeof(*msg_cfg));
1391
1392         if (!dev->device->devargs)
1393                 return ret;
1394
1395         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1396         if (!kvlist)
1397                 return -EINVAL;
1398
1399         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1400         if (!kvargs_count)
1401                 goto free_end;
1402
1403         if (kvargs_count > 1) {
1404                 PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1405                                 ETH_I40E_VF_MSG_CFG);
1406                 ret = -EINVAL;
1407                 goto free_end;
1408         }
1409
1410         if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1411                         read_vf_msg_config, msg_cfg) < 0)
1412                 ret = -EINVAL;
1413
1414 free_end:
1415         rte_kvargs_free(kvlist);
1416         return ret;
1417 }
1418
1419 #define I40E_ALARM_INTERVAL 50000 /* us */
1420
1421 static int
1422 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1423 {
1424         struct rte_pci_device *pci_dev;
1425         struct rte_intr_handle *intr_handle;
1426         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1427         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1428         struct i40e_vsi *vsi;
1429         int ret;
1430         uint32_t len, val;
1431         uint8_t aq_fail = 0;
1432
1433         PMD_INIT_FUNC_TRACE();
1434
1435         dev->dev_ops = &i40e_eth_dev_ops;
1436         dev->rx_queue_count = i40e_dev_rx_queue_count;
1437         dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
1438         dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1439         dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1440         dev->rx_pkt_burst = i40e_recv_pkts;
1441         dev->tx_pkt_burst = i40e_xmit_pkts;
1442         dev->tx_pkt_prepare = i40e_prep_pkts;
1443
1444         /* for secondary processes, we don't initialise any further as primary
1445          * has already done this work. Only check we don't need a different
1446          * RX function */
1447         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1448                 i40e_set_rx_function(dev);
1449                 i40e_set_tx_function(dev);
1450                 return 0;
1451         }
1452         i40e_set_default_ptype_table(dev);
1453         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1454         intr_handle = &pci_dev->intr_handle;
1455
1456         rte_eth_copy_pci_info(dev, pci_dev);
1457         dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1458
1459         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1460         pf->dev_data = dev->data;
1461
1462         hw->back = I40E_PF_TO_ADAPTER(pf);
1463         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1464         if (!hw->hw_addr) {
1465                 PMD_INIT_LOG(ERR,
1466                         "Hardware is not available, as address is NULL");
1467                 return -ENODEV;
1468         }
1469
1470         hw->vendor_id = pci_dev->id.vendor_id;
1471         hw->device_id = pci_dev->id.device_id;
1472         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1473         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1474         hw->bus.device = pci_dev->addr.devid;
1475         hw->bus.func = pci_dev->addr.function;
1476         hw->adapter_stopped = 0;
1477         hw->adapter_closed = 0;
1478
1479         /* Init switch device pointer */
1480         hw->switch_dev = NULL;
1481
1482         /*
1483          * Switch Tag value should not be identical to either the First Tag
1484          * or Second Tag values. So set something other than common Ethertype
1485          * for internal switching.
1486          */
1487         hw->switch_tag = 0xffff;
1488
1489         val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1490         if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1491                 PMD_INIT_LOG(ERR, "\nERROR: "
1492                         "Firmware recovery mode detected. Limiting functionality.\n"
1493                         "Refer to the Intel(R) Ethernet Adapters and Devices "
1494                         "User Guide for details on firmware recovery mode.");
1495                 return -EIO;
1496         }
1497
1498         i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1499         /* Check if need to support multi-driver */
1500         i40e_support_multi_driver(dev);
1501
1502         /* Make sure all is clean before doing PF reset */
1503         i40e_clear_hw(hw);
1504
1505         /* Reset here to make sure all is clean for each PF */
1506         ret = i40e_pf_reset(hw);
1507         if (ret) {
1508                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1509                 return ret;
1510         }
1511
1512         /* Initialize the shared code (base driver) */
1513         ret = i40e_init_shared_code(hw);
1514         if (ret) {
1515                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1516                 return ret;
1517         }
1518
1519         /* Initialize the parameters for adminq */
1520         i40e_init_adminq_parameter(hw);
1521         ret = i40e_init_adminq(hw);
1522         if (ret != I40E_SUCCESS) {
1523                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1524                 return -EIO;
1525         }
1526         /* Firmware of SFP x722 does not support 802.1ad frames ability */
1527         if (hw->device_id == I40E_DEV_ID_SFP_X722 ||
1528                 hw->device_id == I40E_DEV_ID_SFP_I_X722)
1529                 hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1530
1531         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1532                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1533                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1534                      ((hw->nvm.version >> 12) & 0xf),
1535                      ((hw->nvm.version >> 4) & 0xff),
1536                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1537
1538         /* Initialize the hardware */
1539         i40e_hw_init(dev);
1540
1541         i40e_config_automask(pf);
1542
1543         i40e_set_default_pctype_table(dev);
1544
1545         /*
1546          * To work around the NVM issue, initialize registers
1547          * for packet type of QinQ by software.
1548          * It should be removed once issues are fixed in NVM.
1549          */
1550         if (!pf->support_multi_driver)
1551                 i40e_GLQF_reg_init(hw);
1552
1553         /* Initialize the input set for filters (hash and fd) to default value */
1554         i40e_filter_input_set_init(pf);
1555
1556         /* initialise the L3_MAP register */
1557         if (!pf->support_multi_driver) {
1558                 ret = i40e_aq_debug_write_global_register(hw,
1559                                                    I40E_GLQF_L3_MAP(40),
1560                                                    0x00000028,  NULL);
1561                 if (ret)
1562                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1563                                      ret);
1564                 PMD_INIT_LOG(DEBUG,
1565                              "Global register 0x%08x is changed with 0x28",
1566                              I40E_GLQF_L3_MAP(40));
1567         }
1568
1569         /* Need the special FW version to support floating VEB */
1570         config_floating_veb(dev);
1571         /* Clear PXE mode */
1572         i40e_clear_pxe_mode(hw);
1573         i40e_dev_sync_phy_type(hw);
1574
1575         /*
1576          * On X710, performance number is far from the expectation on recent
1577          * firmware versions. The fix for this issue may not be integrated in
1578          * the following firmware version. So the workaround in software driver
1579          * is needed. It needs to modify the initial values of 3 internal only
1580          * registers. Note that the workaround can be removed when it is fixed
1581          * in firmware in the future.
1582          */
1583         i40e_configure_registers(hw);
1584
1585         /* Get hw capabilities */
1586         ret = i40e_get_cap(hw);
1587         if (ret != I40E_SUCCESS) {
1588                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1589                 goto err_get_capabilities;
1590         }
1591
1592         /* Initialize parameters for PF */
1593         ret = i40e_pf_parameter_init(dev);
1594         if (ret != 0) {
1595                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1596                 goto err_parameter_init;
1597         }
1598
1599         /* Initialize the queue management */
1600         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1601         if (ret < 0) {
1602                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1603                 goto err_qp_pool_init;
1604         }
1605         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1606                                 hw->func_caps.num_msix_vectors - 1);
1607         if (ret < 0) {
1608                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1609                 goto err_msix_pool_init;
1610         }
1611
1612         /* Initialize lan hmc */
1613         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1614                                 hw->func_caps.num_rx_qp, 0, 0);
1615         if (ret != I40E_SUCCESS) {
1616                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1617                 goto err_init_lan_hmc;
1618         }
1619
1620         /* Configure lan hmc */
1621         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1622         if (ret != I40E_SUCCESS) {
1623                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1624                 goto err_configure_lan_hmc;
1625         }
1626
1627         /* Get and check the mac address */
1628         i40e_get_mac_addr(hw, hw->mac.addr);
1629         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1630                 PMD_INIT_LOG(ERR, "mac address is not valid");
1631                 ret = -EIO;
1632                 goto err_get_mac_addr;
1633         }
1634         /* Copy the permanent MAC address */
1635         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1636                         (struct rte_ether_addr *)hw->mac.perm_addr);
1637
1638         /* Disable flow control */
1639         hw->fc.requested_mode = I40E_FC_NONE;
1640         i40e_set_fc(hw, &aq_fail, TRUE);
1641
1642         /* Set the global registers with default ether type value */
1643         if (!pf->support_multi_driver) {
1644                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1645                                          RTE_ETHER_TYPE_VLAN);
1646                 if (ret != I40E_SUCCESS) {
1647                         PMD_INIT_LOG(ERR,
1648                                      "Failed to set the default outer "
1649                                      "VLAN ether type");
1650                         goto err_setup_pf_switch;
1651                 }
1652         }
1653
1654         /* PF setup, which includes VSI setup */
1655         ret = i40e_pf_setup(pf);
1656         if (ret) {
1657                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1658                 goto err_setup_pf_switch;
1659         }
1660
1661         vsi = pf->main_vsi;
1662
1663         /* Disable double vlan by default */
1664         i40e_vsi_config_double_vlan(vsi, FALSE);
1665
1666         /* Disable S-TAG identification when floating_veb is disabled */
1667         if (!pf->floating_veb) {
1668                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1669                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1670                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1671                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1672                 }
1673         }
1674
1675         if (!vsi->max_macaddrs)
1676                 len = RTE_ETHER_ADDR_LEN;
1677         else
1678                 len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1679
1680         /* Should be after VSI initialized */
1681         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1682         if (!dev->data->mac_addrs) {
1683                 PMD_INIT_LOG(ERR,
1684                         "Failed to allocated memory for storing mac address");
1685                 goto err_mac_alloc;
1686         }
1687         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1688                                         &dev->data->mac_addrs[0]);
1689
1690         /* Init dcb to sw mode by default */
1691         ret = i40e_dcb_init_configure(dev, TRUE);
1692         if (ret != I40E_SUCCESS) {
1693                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1694                 pf->flags &= ~I40E_FLAG_DCB;
1695         }
1696         /* Update HW struct after DCB configuration */
1697         i40e_get_cap(hw);
1698
1699         /* initialize pf host driver to setup SRIOV resource if applicable */
1700         i40e_pf_host_init(dev);
1701
1702         /* register callback func to eal lib */
1703         rte_intr_callback_register(intr_handle,
1704                                    i40e_dev_interrupt_handler, dev);
1705
1706         /* configure and enable device interrupt */
1707         i40e_pf_config_irq0(hw, TRUE);
1708         i40e_pf_enable_irq0(hw);
1709
1710         /* enable uio intr after callback register */
1711         rte_intr_enable(intr_handle);
1712
1713         /* By default disable flexible payload in global configuration */
1714         if (!pf->support_multi_driver)
1715                 i40e_flex_payload_reg_set_default(hw);
1716
1717         /*
1718          * Add an ethertype filter to drop all flow control frames transmitted
1719          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1720          * frames to wire.
1721          */
1722         i40e_add_tx_flow_control_drop_filter(pf);
1723
1724         /* Set the max frame size to 0x2600 by default,
1725          * in case other drivers changed the default value.
1726          */
1727         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
1728
1729         /* initialize mirror rule list */
1730         TAILQ_INIT(&pf->mirror_list);
1731
1732         /* initialize RSS rule list */
1733         TAILQ_INIT(&pf->rss_config_list);
1734
1735         /* initialize Traffic Manager configuration */
1736         i40e_tm_conf_init(dev);
1737
1738         /* Initialize customized information */
1739         i40e_init_customized_info(pf);
1740
1741         /* Initialize the filter invalidation configuration */
1742         i40e_init_filter_invalidation(pf);
1743
1744         ret = i40e_init_ethtype_filter_list(dev);
1745         if (ret < 0)
1746                 goto err_init_ethtype_filter_list;
1747         ret = i40e_init_tunnel_filter_list(dev);
1748         if (ret < 0)
1749                 goto err_init_tunnel_filter_list;
1750         ret = i40e_init_fdir_filter_list(dev);
1751         if (ret < 0)
1752                 goto err_init_fdir_filter_list;
1753
1754         /* initialize queue region configuration */
1755         i40e_init_queue_region_conf(dev);
1756
1757         /* reset all stats of the device, including pf and main vsi */
1758         i40e_dev_stats_reset(dev);
1759
1760         return 0;
1761
1762 err_init_fdir_filter_list:
1763         rte_hash_free(pf->tunnel.hash_table);
1764         rte_free(pf->tunnel.hash_map);
1765 err_init_tunnel_filter_list:
1766         rte_hash_free(pf->ethertype.hash_table);
1767         rte_free(pf->ethertype.hash_map);
1768 err_init_ethtype_filter_list:
1769         rte_intr_callback_unregister(intr_handle,
1770                 i40e_dev_interrupt_handler, dev);
1771         rte_free(dev->data->mac_addrs);
1772         dev->data->mac_addrs = NULL;
1773 err_mac_alloc:
1774         i40e_vsi_release(pf->main_vsi);
1775 err_setup_pf_switch:
1776 err_get_mac_addr:
1777 err_configure_lan_hmc:
1778         (void)i40e_shutdown_lan_hmc(hw);
1779 err_init_lan_hmc:
1780         i40e_res_pool_destroy(&pf->msix_pool);
1781 err_msix_pool_init:
1782         i40e_res_pool_destroy(&pf->qp_pool);
1783 err_qp_pool_init:
1784 err_parameter_init:
1785 err_get_capabilities:
1786         (void)i40e_shutdown_adminq(hw);
1787
1788         return ret;
1789 }
1790
1791 static void
1792 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1793 {
1794         struct i40e_ethertype_filter *p_ethertype;
1795         struct i40e_ethertype_rule *ethertype_rule;
1796
1797         ethertype_rule = &pf->ethertype;
1798         /* Remove all ethertype filter rules and hash */
1799         if (ethertype_rule->hash_map)
1800                 rte_free(ethertype_rule->hash_map);
1801         if (ethertype_rule->hash_table)
1802                 rte_hash_free(ethertype_rule->hash_table);
1803
1804         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1805                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1806                              p_ethertype, rules);
1807                 rte_free(p_ethertype);
1808         }
1809 }
1810
1811 static void
1812 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1813 {
1814         struct i40e_tunnel_filter *p_tunnel;
1815         struct i40e_tunnel_rule *tunnel_rule;
1816
1817         tunnel_rule = &pf->tunnel;
1818         /* Remove all tunnel director rules and hash */
1819         if (tunnel_rule->hash_map)
1820                 rte_free(tunnel_rule->hash_map);
1821         if (tunnel_rule->hash_table)
1822                 rte_hash_free(tunnel_rule->hash_table);
1823
1824         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1825                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1826                 rte_free(p_tunnel);
1827         }
1828 }
1829
1830 static void
1831 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1832 {
1833         struct i40e_fdir_filter *p_fdir;
1834         struct i40e_fdir_info *fdir_info;
1835
1836         fdir_info = &pf->fdir;
1837
1838         /* Remove all flow director rules */
1839         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1840                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1841 }
1842
1843 static void
1844 i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1845 {
1846         struct i40e_fdir_info *fdir_info;
1847
1848         fdir_info = &pf->fdir;
1849
1850         /* flow director memory cleanup */
1851         if (fdir_info->hash_map)
1852                 rte_free(fdir_info->hash_map);
1853         if (fdir_info->hash_table)
1854                 rte_hash_free(fdir_info->hash_table);
1855         if (fdir_info->fdir_flow_pool.bitmap)
1856                 rte_free(fdir_info->fdir_flow_pool.bitmap);
1857         if (fdir_info->fdir_flow_pool.pool)
1858                 rte_free(fdir_info->fdir_flow_pool.pool);
1859         if (fdir_info->fdir_filter_array)
1860                 rte_free(fdir_info->fdir_filter_array);
1861 }
1862
1863 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1864 {
1865         /*
1866          * Disable by default flexible payload
1867          * for corresponding L2/L3/L4 layers.
1868          */
1869         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1870         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1871         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1872 }
1873
1874 static int
1875 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1876 {
1877         struct i40e_hw *hw;
1878
1879         PMD_INIT_FUNC_TRACE();
1880
1881         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1882                 return 0;
1883
1884         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1885
1886         if (hw->adapter_closed == 0)
1887                 i40e_dev_close(dev);
1888
1889         return 0;
1890 }
1891
1892 static int
1893 i40e_dev_configure(struct rte_eth_dev *dev)
1894 {
1895         struct i40e_adapter *ad =
1896                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1897         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1898         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1899         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1900         int i, ret;
1901
1902         ret = i40e_dev_sync_phy_type(hw);
1903         if (ret)
1904                 return ret;
1905
1906         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1907          * bulk allocation or vector Rx preconditions we will reset it.
1908          */
1909         ad->rx_bulk_alloc_allowed = true;
1910         ad->rx_vec_allowed = true;
1911         ad->tx_simple_allowed = true;
1912         ad->tx_vec_allowed = true;
1913
1914         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1915                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1916
1917         /* Only legacy filter API needs the following fdir config. So when the
1918          * legacy filter API is deprecated, the following codes should also be
1919          * removed.
1920          */
1921         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1922                 ret = i40e_fdir_setup(pf);
1923                 if (ret != I40E_SUCCESS) {
1924                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1925                         return -ENOTSUP;
1926                 }
1927                 ret = i40e_fdir_configure(dev);
1928                 if (ret < 0) {
1929                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1930                         goto err;
1931                 }
1932         } else
1933                 i40e_fdir_teardown(pf);
1934
1935         ret = i40e_dev_init_vlan(dev);
1936         if (ret < 0)
1937                 goto err;
1938
1939         /* VMDQ setup.
1940          *  General PMD driver call sequence are NIC init, configure,
1941          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1942          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1943          *  applicable. So, VMDQ setting has to be done before
1944          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1945          *  For RSS setting, it will try to calculate actual configured RX queue
1946          *  number, which will be available after rx_queue_setup(). dev_start()
1947          *  function is good to place RSS setup.
1948          */
1949         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1950                 ret = i40e_vmdq_setup(dev);
1951                 if (ret)
1952                         goto err;
1953         }
1954
1955         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1956                 ret = i40e_dcb_setup(dev);
1957                 if (ret) {
1958                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1959                         goto err_dcb;
1960                 }
1961         }
1962
1963         TAILQ_INIT(&pf->flow_list);
1964
1965         return 0;
1966
1967 err_dcb:
1968         /* need to release vmdq resource if exists */
1969         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1970                 i40e_vsi_release(pf->vmdq[i].vsi);
1971                 pf->vmdq[i].vsi = NULL;
1972         }
1973         rte_free(pf->vmdq);
1974         pf->vmdq = NULL;
1975 err:
1976         /* Need to release fdir resource if exists.
1977          * Only legacy filter API needs the following fdir config. So when the
1978          * legacy filter API is deprecated, the following code should also be
1979          * removed.
1980          */
1981         i40e_fdir_teardown(pf);
1982         return ret;
1983 }
1984
1985 void
1986 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1987 {
1988         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
1989         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1990         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1991         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1992         uint16_t msix_vect = vsi->msix_intr;
1993         uint16_t i;
1994
1995         for (i = 0; i < vsi->nb_qps; i++) {
1996                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1997                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1998                 rte_wmb();
1999         }
2000
2001         if (vsi->type != I40E_VSI_SRIOV) {
2002                 if (!rte_intr_allow_others(intr_handle)) {
2003                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2004                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
2005                         I40E_WRITE_REG(hw,
2006                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2007                                        0);
2008                 } else {
2009                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2010                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
2011                         I40E_WRITE_REG(hw,
2012                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2013                                                        msix_vect - 1), 0);
2014                 }
2015         } else {
2016                 uint32_t reg;
2017                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2018                         vsi->user_param + (msix_vect - 1);
2019
2020                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2021                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
2022         }
2023         I40E_WRITE_FLUSH(hw);
2024 }
2025
2026 static void
2027 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
2028                        int base_queue, int nb_queue,
2029                        uint16_t itr_idx)
2030 {
2031         int i;
2032         uint32_t val;
2033         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2034         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2035
2036         /* Bind all RX queues to allocated MSIX interrupt */
2037         for (i = 0; i < nb_queue; i++) {
2038                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2039                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2040                         ((base_queue + i + 1) <<
2041                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2042                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2043                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2044
2045                 if (i == nb_queue - 1)
2046                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2047                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2048         }
2049
2050         /* Write first RX queue to Link list register as the head element */
2051         if (vsi->type != I40E_VSI_SRIOV) {
2052                 uint16_t interval =
2053                         i40e_calc_itr_interval(1, pf->support_multi_driver);
2054
2055                 if (msix_vect == I40E_MISC_VEC_ID) {
2056                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2057                                        (base_queue <<
2058                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2059                                        (0x0 <<
2060                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2061                         I40E_WRITE_REG(hw,
2062                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2063                                        interval);
2064                 } else {
2065                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2066                                        (base_queue <<
2067                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2068                                        (0x0 <<
2069                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2070                         I40E_WRITE_REG(hw,
2071                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2072                                                        msix_vect - 1),
2073                                        interval);
2074                 }
2075         } else {
2076                 uint32_t reg;
2077
2078                 if (msix_vect == I40E_MISC_VEC_ID) {
2079                         I40E_WRITE_REG(hw,
2080                                        I40E_VPINT_LNKLST0(vsi->user_param),
2081                                        (base_queue <<
2082                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2083                                        (0x0 <<
2084                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2085                 } else {
2086                         /* num_msix_vectors_vf needs to minus irq0 */
2087                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2088                                 vsi->user_param + (msix_vect - 1);
2089
2090                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2091                                        (base_queue <<
2092                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2093                                        (0x0 <<
2094                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2095                 }
2096         }
2097
2098         I40E_WRITE_FLUSH(hw);
2099 }
2100
2101 int
2102 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2103 {
2104         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2105         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2106         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2107         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2108         uint16_t msix_vect = vsi->msix_intr;
2109         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2110         uint16_t queue_idx = 0;
2111         int record = 0;
2112         int i;
2113
2114         for (i = 0; i < vsi->nb_qps; i++) {
2115                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2116                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2117         }
2118
2119         /* VF bind interrupt */
2120         if (vsi->type == I40E_VSI_SRIOV) {
2121                 if (vsi->nb_msix == 0) {
2122                         PMD_DRV_LOG(ERR, "No msix resource");
2123                         return -EINVAL;
2124                 }
2125                 __vsi_queues_bind_intr(vsi, msix_vect,
2126                                        vsi->base_queue, vsi->nb_qps,
2127                                        itr_idx);
2128                 return 0;
2129         }
2130
2131         /* PF & VMDq bind interrupt */
2132         if (rte_intr_dp_is_en(intr_handle)) {
2133                 if (vsi->type == I40E_VSI_MAIN) {
2134                         queue_idx = 0;
2135                         record = 1;
2136                 } else if (vsi->type == I40E_VSI_VMDQ2) {
2137                         struct i40e_vsi *main_vsi =
2138                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2139                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
2140                         record = 1;
2141                 }
2142         }
2143
2144         for (i = 0; i < vsi->nb_used_qps; i++) {
2145                 if (vsi->nb_msix == 0) {
2146                         PMD_DRV_LOG(ERR, "No msix resource");
2147                         return -EINVAL;
2148                 } else if (nb_msix <= 1) {
2149                         if (!rte_intr_allow_others(intr_handle))
2150                                 /* allow to share MISC_VEC_ID */
2151                                 msix_vect = I40E_MISC_VEC_ID;
2152
2153                         /* no enough msix_vect, map all to one */
2154                         __vsi_queues_bind_intr(vsi, msix_vect,
2155                                                vsi->base_queue + i,
2156                                                vsi->nb_used_qps - i,
2157                                                itr_idx);
2158                         for (; !!record && i < vsi->nb_used_qps; i++)
2159                                 intr_handle->intr_vec[queue_idx + i] =
2160                                         msix_vect;
2161                         break;
2162                 }
2163                 /* 1:1 queue/msix_vect mapping */
2164                 __vsi_queues_bind_intr(vsi, msix_vect,
2165                                        vsi->base_queue + i, 1,
2166                                        itr_idx);
2167                 if (!!record)
2168                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
2169
2170                 msix_vect++;
2171                 nb_msix--;
2172         }
2173
2174         return 0;
2175 }
2176
2177 void
2178 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2179 {
2180         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2181         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2182         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2183         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2184         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2185         uint16_t msix_intr, i;
2186
2187         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2188                 for (i = 0; i < vsi->nb_msix; i++) {
2189                         msix_intr = vsi->msix_intr + i;
2190                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2191                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
2192                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2193                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2194                 }
2195         else
2196                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2197                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
2198                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2199                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2200
2201         I40E_WRITE_FLUSH(hw);
2202 }
2203
2204 void
2205 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2206 {
2207         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2208         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2209         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2210         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2211         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2212         uint16_t msix_intr, i;
2213
2214         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2215                 for (i = 0; i < vsi->nb_msix; i++) {
2216                         msix_intr = vsi->msix_intr + i;
2217                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2218                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2219                 }
2220         else
2221                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2222                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2223
2224         I40E_WRITE_FLUSH(hw);
2225 }
2226
2227 static inline uint8_t
2228 i40e_parse_link_speeds(uint16_t link_speeds)
2229 {
2230         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2231
2232         if (link_speeds & ETH_LINK_SPEED_40G)
2233                 link_speed |= I40E_LINK_SPEED_40GB;
2234         if (link_speeds & ETH_LINK_SPEED_25G)
2235                 link_speed |= I40E_LINK_SPEED_25GB;
2236         if (link_speeds & ETH_LINK_SPEED_20G)
2237                 link_speed |= I40E_LINK_SPEED_20GB;
2238         if (link_speeds & ETH_LINK_SPEED_10G)
2239                 link_speed |= I40E_LINK_SPEED_10GB;
2240         if (link_speeds & ETH_LINK_SPEED_1G)
2241                 link_speed |= I40E_LINK_SPEED_1GB;
2242         if (link_speeds & ETH_LINK_SPEED_100M)
2243                 link_speed |= I40E_LINK_SPEED_100MB;
2244
2245         return link_speed;
2246 }
2247
2248 static int
2249 i40e_phy_conf_link(struct i40e_hw *hw,
2250                    uint8_t abilities,
2251                    uint8_t force_speed,
2252                    bool is_up)
2253 {
2254         enum i40e_status_code status;
2255         struct i40e_aq_get_phy_abilities_resp phy_ab;
2256         struct i40e_aq_set_phy_config phy_conf;
2257         enum i40e_aq_phy_type cnt;
2258         uint8_t avail_speed;
2259         uint32_t phy_type_mask = 0;
2260
2261         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2262                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2263                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2264                         I40E_AQ_PHY_FLAG_LOW_POWER;
2265         int ret = -ENOTSUP;
2266
2267         /* To get phy capabilities of available speeds. */
2268         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2269                                               NULL);
2270         if (status) {
2271                 PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2272                                 status);
2273                 return ret;
2274         }
2275         avail_speed = phy_ab.link_speed;
2276
2277         /* To get the current phy config. */
2278         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2279                                               NULL);
2280         if (status) {
2281                 PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2282                                 status);
2283                 return ret;
2284         }
2285
2286         /* If link needs to go up and it is in autoneg mode the speed is OK,
2287          * no need to set up again.
2288          */
2289         if (is_up && phy_ab.phy_type != 0 &&
2290                      abilities & I40E_AQ_PHY_AN_ENABLED &&
2291                      phy_ab.link_speed != 0)
2292                 return I40E_SUCCESS;
2293
2294         memset(&phy_conf, 0, sizeof(phy_conf));
2295
2296         /* bits 0-2 use the values from get_phy_abilities_resp */
2297         abilities &= ~mask;
2298         abilities |= phy_ab.abilities & mask;
2299
2300         phy_conf.abilities = abilities;
2301
2302         /* If link needs to go up, but the force speed is not supported,
2303          * Warn users and config the default available speeds.
2304          */
2305         if (is_up && !(force_speed & avail_speed)) {
2306                 PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2307                 phy_conf.link_speed = avail_speed;
2308         } else {
2309                 phy_conf.link_speed = is_up ? force_speed : avail_speed;
2310         }
2311
2312         /* PHY type mask needs to include each type except PHY type extension */
2313         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2314                 phy_type_mask |= 1 << cnt;
2315
2316         /* use get_phy_abilities_resp value for the rest */
2317         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2318         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2319                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2320                 I40E_AQ_PHY_TYPE_EXT_25G_LR | I40E_AQ_PHY_TYPE_EXT_25G_AOC |
2321                 I40E_AQ_PHY_TYPE_EXT_25G_ACC) : 0;
2322         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2323         phy_conf.eee_capability = phy_ab.eee_capability;
2324         phy_conf.eeer = phy_ab.eeer_val;
2325         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2326
2327         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2328                     phy_ab.abilities, phy_ab.link_speed);
2329         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2330                     phy_conf.abilities, phy_conf.link_speed);
2331
2332         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2333         if (status)
2334                 return ret;
2335
2336         return I40E_SUCCESS;
2337 }
2338
2339 static int
2340 i40e_apply_link_speed(struct rte_eth_dev *dev)
2341 {
2342         uint8_t speed;
2343         uint8_t abilities = 0;
2344         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2345         struct rte_eth_conf *conf = &dev->data->dev_conf;
2346
2347         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2348                      I40E_AQ_PHY_LINK_ENABLED;
2349
2350         if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2351                 conf->link_speeds = ETH_LINK_SPEED_40G |
2352                                     ETH_LINK_SPEED_25G |
2353                                     ETH_LINK_SPEED_20G |
2354                                     ETH_LINK_SPEED_10G |
2355                                     ETH_LINK_SPEED_1G |
2356                                     ETH_LINK_SPEED_100M;
2357
2358                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2359         } else {
2360                 abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2361         }
2362         speed = i40e_parse_link_speeds(conf->link_speeds);
2363
2364         return i40e_phy_conf_link(hw, abilities, speed, true);
2365 }
2366
2367 static int
2368 i40e_dev_start(struct rte_eth_dev *dev)
2369 {
2370         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2371         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2372         struct i40e_vsi *main_vsi = pf->main_vsi;
2373         int ret, i;
2374         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2375         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2376         uint32_t intr_vector = 0;
2377         struct i40e_vsi *vsi;
2378         uint16_t nb_rxq, nb_txq;
2379
2380         hw->adapter_stopped = 0;
2381
2382         rte_intr_disable(intr_handle);
2383
2384         if ((rte_intr_cap_multiple(intr_handle) ||
2385              !RTE_ETH_DEV_SRIOV(dev).active) &&
2386             dev->data->dev_conf.intr_conf.rxq != 0) {
2387                 intr_vector = dev->data->nb_rx_queues;
2388                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2389                 if (ret)
2390                         return ret;
2391         }
2392
2393         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2394                 intr_handle->intr_vec =
2395                         rte_zmalloc("intr_vec",
2396                                     dev->data->nb_rx_queues * sizeof(int),
2397                                     0);
2398                 if (!intr_handle->intr_vec) {
2399                         PMD_INIT_LOG(ERR,
2400                                 "Failed to allocate %d rx_queues intr_vec",
2401                                 dev->data->nb_rx_queues);
2402                         return -ENOMEM;
2403                 }
2404         }
2405
2406         /* Initialize VSI */
2407         ret = i40e_dev_rxtx_init(pf);
2408         if (ret != I40E_SUCCESS) {
2409                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2410                 return ret;
2411         }
2412
2413         /* Map queues with MSIX interrupt */
2414         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2415                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2416         ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2417         if (ret < 0)
2418                 return ret;
2419         i40e_vsi_enable_queues_intr(main_vsi);
2420
2421         /* Map VMDQ VSI queues with MSIX interrupt */
2422         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2423                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2424                 ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2425                                                 I40E_ITR_INDEX_DEFAULT);
2426                 if (ret < 0)
2427                         return ret;
2428                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2429         }
2430
2431         /* Enable all queues which have been configured */
2432         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
2433                 ret = i40e_dev_rx_queue_start(dev, nb_rxq);
2434                 if (ret)
2435                         goto rx_err;
2436         }
2437
2438         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
2439                 ret = i40e_dev_tx_queue_start(dev, nb_txq);
2440                 if (ret)
2441                         goto tx_err;
2442         }
2443
2444         /* Enable receiving broadcast packets */
2445         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2446         if (ret != I40E_SUCCESS)
2447                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2448
2449         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2450                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2451                                                 true, NULL);
2452                 if (ret != I40E_SUCCESS)
2453                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2454         }
2455
2456         /* Enable the VLAN promiscuous mode. */
2457         if (pf->vfs) {
2458                 for (i = 0; i < pf->vf_num; i++) {
2459                         vsi = pf->vfs[i].vsi;
2460                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2461                                                      true, NULL);
2462                 }
2463         }
2464
2465         /* Enable mac loopback mode */
2466         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2467             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2468                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2469                 if (ret != I40E_SUCCESS) {
2470                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2471                         goto tx_err;
2472                 }
2473         }
2474
2475         /* Apply link configure */
2476         ret = i40e_apply_link_speed(dev);
2477         if (I40E_SUCCESS != ret) {
2478                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2479                 goto tx_err;
2480         }
2481
2482         if (!rte_intr_allow_others(intr_handle)) {
2483                 rte_intr_callback_unregister(intr_handle,
2484                                              i40e_dev_interrupt_handler,
2485                                              (void *)dev);
2486                 /* configure and enable device interrupt */
2487                 i40e_pf_config_irq0(hw, FALSE);
2488                 i40e_pf_enable_irq0(hw);
2489
2490                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2491                         PMD_INIT_LOG(INFO,
2492                                 "lsc won't enable because of no intr multiplex");
2493         } else {
2494                 ret = i40e_aq_set_phy_int_mask(hw,
2495                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2496                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2497                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2498                 if (ret != I40E_SUCCESS)
2499                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2500
2501                 /* Call get_link_info aq commond to enable/disable LSE */
2502                 i40e_dev_link_update(dev, 0);
2503         }
2504
2505         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2506                 rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2507                                   i40e_dev_alarm_handler, dev);
2508         } else {
2509                 /* enable uio intr after callback register */
2510                 rte_intr_enable(intr_handle);
2511         }
2512
2513         i40e_filter_restore(pf);
2514
2515         if (pf->tm_conf.root && !pf->tm_conf.committed)
2516                 PMD_DRV_LOG(WARNING,
2517                             "please call hierarchy_commit() "
2518                             "before starting the port");
2519
2520         return I40E_SUCCESS;
2521
2522 tx_err:
2523         for (i = 0; i < nb_txq; i++)
2524                 i40e_dev_tx_queue_stop(dev, i);
2525 rx_err:
2526         for (i = 0; i < nb_rxq; i++)
2527                 i40e_dev_rx_queue_stop(dev, i);
2528
2529         return ret;
2530 }
2531
2532 static int
2533 i40e_dev_stop(struct rte_eth_dev *dev)
2534 {
2535         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2536         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2537         struct i40e_vsi *main_vsi = pf->main_vsi;
2538         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2539         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2540         int i;
2541
2542         if (hw->adapter_stopped == 1)
2543                 return 0;
2544
2545         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2546                 rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2547                 rte_intr_enable(intr_handle);
2548         }
2549
2550         /* Disable all queues */
2551         for (i = 0; i < dev->data->nb_tx_queues; i++)
2552                 i40e_dev_tx_queue_stop(dev, i);
2553
2554         for (i = 0; i < dev->data->nb_rx_queues; i++)
2555                 i40e_dev_rx_queue_stop(dev, i);
2556
2557         /* un-map queues with interrupt registers */
2558         i40e_vsi_disable_queues_intr(main_vsi);
2559         i40e_vsi_queues_unbind_intr(main_vsi);
2560
2561         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2562                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2563                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2564         }
2565
2566         /* Clear all queues and release memory */
2567         i40e_dev_clear_queues(dev);
2568
2569         /* Set link down */
2570         i40e_dev_set_link_down(dev);
2571
2572         if (!rte_intr_allow_others(intr_handle))
2573                 /* resume to the default handler */
2574                 rte_intr_callback_register(intr_handle,
2575                                            i40e_dev_interrupt_handler,
2576                                            (void *)dev);
2577
2578         /* Clean datapath event and queue/vec mapping */
2579         rte_intr_efd_disable(intr_handle);
2580         if (intr_handle->intr_vec) {
2581                 rte_free(intr_handle->intr_vec);
2582                 intr_handle->intr_vec = NULL;
2583         }
2584
2585         /* reset hierarchy commit */
2586         pf->tm_conf.committed = false;
2587
2588         hw->adapter_stopped = 1;
2589         dev->data->dev_started = 0;
2590
2591         pf->adapter->rss_reta_updated = 0;
2592
2593         return 0;
2594 }
2595
2596 static int
2597 i40e_dev_close(struct rte_eth_dev *dev)
2598 {
2599         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2600         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2601         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2602         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2603         struct i40e_mirror_rule *p_mirror;
2604         struct i40e_filter_control_settings settings;
2605         struct rte_flow *p_flow;
2606         uint32_t reg;
2607         int i;
2608         int ret;
2609         uint8_t aq_fail = 0;
2610         int retries = 0;
2611
2612         PMD_INIT_FUNC_TRACE();
2613         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2614                 return 0;
2615
2616         ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2617         if (ret)
2618                 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2619
2620
2621         ret = i40e_dev_stop(dev);
2622
2623         /* Remove all mirror rules */
2624         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2625                 ret = i40e_aq_del_mirror_rule(hw,
2626                                               pf->main_vsi->veb->seid,
2627                                               p_mirror->rule_type,
2628                                               p_mirror->entries,
2629                                               p_mirror->num_entries,
2630                                               p_mirror->id);
2631                 if (ret < 0)
2632                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2633                                     "status = %d, aq_err = %d.", ret,
2634                                     hw->aq.asq_last_status);
2635
2636                 /* remove mirror software resource anyway */
2637                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2638                 rte_free(p_mirror);
2639                 pf->nb_mirror_rule--;
2640         }
2641
2642         i40e_dev_free_queues(dev);
2643
2644         /* Disable interrupt */
2645         i40e_pf_disable_irq0(hw);
2646         rte_intr_disable(intr_handle);
2647
2648         /*
2649          * Only legacy filter API needs the following fdir config. So when the
2650          * legacy filter API is deprecated, the following code should also be
2651          * removed.
2652          */
2653         i40e_fdir_teardown(pf);
2654
2655         /* shutdown and destroy the HMC */
2656         i40e_shutdown_lan_hmc(hw);
2657
2658         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2659                 i40e_vsi_release(pf->vmdq[i].vsi);
2660                 pf->vmdq[i].vsi = NULL;
2661         }
2662         rte_free(pf->vmdq);
2663         pf->vmdq = NULL;
2664
2665         /* release all the existing VSIs and VEBs */
2666         i40e_vsi_release(pf->main_vsi);
2667
2668         /* shutdown the adminq */
2669         i40e_aq_queue_shutdown(hw, true);
2670         i40e_shutdown_adminq(hw);
2671
2672         i40e_res_pool_destroy(&pf->qp_pool);
2673         i40e_res_pool_destroy(&pf->msix_pool);
2674
2675         /* Disable flexible payload in global configuration */
2676         if (!pf->support_multi_driver)
2677                 i40e_flex_payload_reg_set_default(hw);
2678
2679         /* force a PF reset to clean anything leftover */
2680         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2681         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2682                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2683         I40E_WRITE_FLUSH(hw);
2684
2685         /* Clear PXE mode */
2686         i40e_clear_pxe_mode(hw);
2687
2688         /* Unconfigure filter control */
2689         memset(&settings, 0, sizeof(settings));
2690         ret = i40e_set_filter_control(hw, &settings);
2691         if (ret)
2692                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2693                                         ret);
2694
2695         /* Disable flow control */
2696         hw->fc.requested_mode = I40E_FC_NONE;
2697         i40e_set_fc(hw, &aq_fail, TRUE);
2698
2699         /* uninitialize pf host driver */
2700         i40e_pf_host_uninit(dev);
2701
2702         do {
2703                 ret = rte_intr_callback_unregister(intr_handle,
2704                                 i40e_dev_interrupt_handler, dev);
2705                 if (ret >= 0 || ret == -ENOENT) {
2706                         break;
2707                 } else if (ret != -EAGAIN) {
2708                         PMD_INIT_LOG(ERR,
2709                                  "intr callback unregister failed: %d",
2710                                  ret);
2711                 }
2712                 i40e_msec_delay(500);
2713         } while (retries++ < 5);
2714
2715         i40e_rm_ethtype_filter_list(pf);
2716         i40e_rm_tunnel_filter_list(pf);
2717         i40e_rm_fdir_filter_list(pf);
2718
2719         /* Remove all flows */
2720         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2721                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2722                 /* Do not free FDIR flows since they are static allocated */
2723                 if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
2724                         rte_free(p_flow);
2725         }
2726
2727         /* release the fdir static allocated memory */
2728         i40e_fdir_memory_cleanup(pf);
2729
2730         /* Remove all Traffic Manager configuration */
2731         i40e_tm_conf_uninit(dev);
2732
2733         i40e_clear_automask(pf);
2734
2735         hw->adapter_closed = 1;
2736         return ret;
2737 }
2738
2739 /*
2740  * Reset PF device only to re-initialize resources in PMD layer
2741  */
2742 static int
2743 i40e_dev_reset(struct rte_eth_dev *dev)
2744 {
2745         int ret;
2746
2747         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2748          * its VF to make them align with it. The detailed notification
2749          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2750          * To avoid unexpected behavior in VF, currently reset of PF with
2751          * SR-IOV activation is not supported. It might be supported later.
2752          */
2753         if (dev->data->sriov.active)
2754                 return -ENOTSUP;
2755
2756         ret = eth_i40e_dev_uninit(dev);
2757         if (ret)
2758                 return ret;
2759
2760         ret = eth_i40e_dev_init(dev, NULL);
2761
2762         return ret;
2763 }
2764
2765 static int
2766 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2767 {
2768         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2769         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2770         struct i40e_vsi *vsi = pf->main_vsi;
2771         int status;
2772
2773         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2774                                                      true, NULL, true);
2775         if (status != I40E_SUCCESS) {
2776                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2777                 return -EAGAIN;
2778         }
2779
2780         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2781                                                         TRUE, NULL);
2782         if (status != I40E_SUCCESS) {
2783                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2784                 /* Rollback unicast promiscuous mode */
2785                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2786                                                     false, NULL, true);
2787                 return -EAGAIN;
2788         }
2789
2790         return 0;
2791 }
2792
2793 static int
2794 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2795 {
2796         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2797         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2798         struct i40e_vsi *vsi = pf->main_vsi;
2799         int status;
2800
2801         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2802                                                      false, NULL, true);
2803         if (status != I40E_SUCCESS) {
2804                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2805                 return -EAGAIN;
2806         }
2807
2808         /* must remain in all_multicast mode */
2809         if (dev->data->all_multicast == 1)
2810                 return 0;
2811
2812         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2813                                                         false, NULL);
2814         if (status != I40E_SUCCESS) {
2815                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2816                 /* Rollback unicast promiscuous mode */
2817                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2818                                                     true, NULL, true);
2819                 return -EAGAIN;
2820         }
2821
2822         return 0;
2823 }
2824
2825 static int
2826 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2827 {
2828         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2829         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2830         struct i40e_vsi *vsi = pf->main_vsi;
2831         int ret;
2832
2833         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2834         if (ret != I40E_SUCCESS) {
2835                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2836                 return -EAGAIN;
2837         }
2838
2839         return 0;
2840 }
2841
2842 static int
2843 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2844 {
2845         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2846         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2847         struct i40e_vsi *vsi = pf->main_vsi;
2848         int ret;
2849
2850         if (dev->data->promiscuous == 1)
2851                 return 0; /* must remain in all_multicast mode */
2852
2853         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2854                                 vsi->seid, FALSE, NULL);
2855         if (ret != I40E_SUCCESS) {
2856                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2857                 return -EAGAIN;
2858         }
2859
2860         return 0;
2861 }
2862
2863 /*
2864  * Set device link up.
2865  */
2866 static int
2867 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2868 {
2869         /* re-apply link speed setting */
2870         return i40e_apply_link_speed(dev);
2871 }
2872
2873 /*
2874  * Set device link down.
2875  */
2876 static int
2877 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2878 {
2879         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2880         uint8_t abilities = 0;
2881         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2882
2883         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2884         return i40e_phy_conf_link(hw, abilities, speed, false);
2885 }
2886
2887 static __rte_always_inline void
2888 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2889 {
2890 /* Link status registers and values*/
2891 #define I40E_PRTMAC_LINKSTA             0x001E2420
2892 #define I40E_REG_LINK_UP                0x40000080
2893 #define I40E_PRTMAC_MACC                0x001E24E0
2894 #define I40E_REG_MACC_25GB              0x00020000
2895 #define I40E_REG_SPEED_MASK             0x38000000
2896 #define I40E_REG_SPEED_0                0x00000000
2897 #define I40E_REG_SPEED_1                0x08000000
2898 #define I40E_REG_SPEED_2                0x10000000
2899 #define I40E_REG_SPEED_3                0x18000000
2900 #define I40E_REG_SPEED_4                0x20000000
2901         uint32_t link_speed;
2902         uint32_t reg_val;
2903
2904         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2905         link_speed = reg_val & I40E_REG_SPEED_MASK;
2906         reg_val &= I40E_REG_LINK_UP;
2907         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2908
2909         if (unlikely(link->link_status == 0))
2910                 return;
2911
2912         /* Parse the link status */
2913         switch (link_speed) {
2914         case I40E_REG_SPEED_0:
2915                 link->link_speed = ETH_SPEED_NUM_100M;
2916                 break;
2917         case I40E_REG_SPEED_1:
2918                 link->link_speed = ETH_SPEED_NUM_1G;
2919                 break;
2920         case I40E_REG_SPEED_2:
2921                 if (hw->mac.type == I40E_MAC_X722)
2922                         link->link_speed = ETH_SPEED_NUM_2_5G;
2923                 else
2924                         link->link_speed = ETH_SPEED_NUM_10G;
2925                 break;
2926         case I40E_REG_SPEED_3:
2927                 if (hw->mac.type == I40E_MAC_X722) {
2928                         link->link_speed = ETH_SPEED_NUM_5G;
2929                 } else {
2930                         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2931
2932                         if (reg_val & I40E_REG_MACC_25GB)
2933                                 link->link_speed = ETH_SPEED_NUM_25G;
2934                         else
2935                                 link->link_speed = ETH_SPEED_NUM_40G;
2936                 }
2937                 break;
2938         case I40E_REG_SPEED_4:
2939                 if (hw->mac.type == I40E_MAC_X722)
2940                         link->link_speed = ETH_SPEED_NUM_10G;
2941                 else
2942                         link->link_speed = ETH_SPEED_NUM_20G;
2943                 break;
2944         default:
2945                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2946                 break;
2947         }
2948 }
2949
2950 static __rte_always_inline void
2951 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2952         bool enable_lse, int wait_to_complete)
2953 {
2954 #define CHECK_INTERVAL             100  /* 100ms */
2955 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2956         uint32_t rep_cnt = MAX_REPEAT_TIME;
2957         struct i40e_link_status link_status;
2958         int status;
2959
2960         memset(&link_status, 0, sizeof(link_status));
2961
2962         do {
2963                 memset(&link_status, 0, sizeof(link_status));
2964
2965                 /* Get link status information from hardware */
2966                 status = i40e_aq_get_link_info(hw, enable_lse,
2967                                                 &link_status, NULL);
2968                 if (unlikely(status != I40E_SUCCESS)) {
2969                         link->link_speed = ETH_SPEED_NUM_NONE;
2970                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2971                         PMD_DRV_LOG(ERR, "Failed to get link info");
2972                         return;
2973                 }
2974
2975                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2976                 if (!wait_to_complete || link->link_status)
2977                         break;
2978
2979                 rte_delay_ms(CHECK_INTERVAL);
2980         } while (--rep_cnt);
2981
2982         /* Parse the link status */
2983         switch (link_status.link_speed) {
2984         case I40E_LINK_SPEED_100MB:
2985                 link->link_speed = ETH_SPEED_NUM_100M;
2986                 break;
2987         case I40E_LINK_SPEED_1GB:
2988                 link->link_speed = ETH_SPEED_NUM_1G;
2989                 break;
2990         case I40E_LINK_SPEED_10GB:
2991                 link->link_speed = ETH_SPEED_NUM_10G;
2992                 break;
2993         case I40E_LINK_SPEED_20GB:
2994                 link->link_speed = ETH_SPEED_NUM_20G;
2995                 break;
2996         case I40E_LINK_SPEED_25GB:
2997                 link->link_speed = ETH_SPEED_NUM_25G;
2998                 break;
2999         case I40E_LINK_SPEED_40GB:
3000                 link->link_speed = ETH_SPEED_NUM_40G;
3001                 break;
3002         default:
3003                 if (link->link_status)
3004                         link->link_speed = ETH_SPEED_NUM_UNKNOWN;
3005                 else
3006                         link->link_speed = ETH_SPEED_NUM_NONE;
3007                 break;
3008         }
3009 }
3010
3011 int
3012 i40e_dev_link_update(struct rte_eth_dev *dev,
3013                      int wait_to_complete)
3014 {
3015         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3016         struct rte_eth_link link;
3017         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3018         int ret;
3019
3020         memset(&link, 0, sizeof(link));
3021
3022         /* i40e uses full duplex only */
3023         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3024         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3025                         ETH_LINK_SPEED_FIXED);
3026
3027         if (!wait_to_complete && !enable_lse)
3028                 update_link_reg(hw, &link);
3029         else
3030                 update_link_aq(hw, &link, enable_lse, wait_to_complete);
3031
3032         if (hw->switch_dev)
3033                 rte_eth_linkstatus_get(hw->switch_dev, &link);
3034
3035         ret = rte_eth_linkstatus_set(dev, &link);
3036         i40e_notify_all_vfs_link_status(dev);
3037
3038         return ret;
3039 }
3040
3041 static void
3042 i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
3043                           uint32_t loreg, bool offset_loaded, uint64_t *offset,
3044                           uint64_t *stat, uint64_t *prev_stat)
3045 {
3046         i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
3047         /* enlarge the limitation when statistics counters overflowed */
3048         if (offset_loaded) {
3049                 if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
3050                         *stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
3051                 *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
3052         }
3053         *prev_stat = *stat;
3054 }
3055
3056 /* Get all the statistics of a VSI */
3057 void
3058 i40e_update_vsi_stats(struct i40e_vsi *vsi)
3059 {
3060         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3061         struct i40e_eth_stats *nes = &vsi->eth_stats;
3062         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3063         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3064
3065         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3066                                   vsi->offset_loaded, &oes->rx_bytes,
3067                                   &nes->rx_bytes, &vsi->prev_rx_bytes);
3068         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3069                             vsi->offset_loaded, &oes->rx_unicast,
3070                             &nes->rx_unicast);
3071         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3072                             vsi->offset_loaded, &oes->rx_multicast,
3073                             &nes->rx_multicast);
3074         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3075                             vsi->offset_loaded, &oes->rx_broadcast,
3076                             &nes->rx_broadcast);
3077         /* exclude CRC bytes */
3078         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3079                 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3080
3081         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3082                             &oes->rx_discards, &nes->rx_discards);
3083         /* GLV_REPC not supported */
3084         /* GLV_RMPC not supported */
3085         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3086                             &oes->rx_unknown_protocol,
3087                             &nes->rx_unknown_protocol);
3088         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3089                                   vsi->offset_loaded, &oes->tx_bytes,
3090                                   &nes->tx_bytes, &vsi->prev_tx_bytes);
3091         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3092                             vsi->offset_loaded, &oes->tx_unicast,
3093                             &nes->tx_unicast);
3094         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3095                             vsi->offset_loaded, &oes->tx_multicast,
3096                             &nes->tx_multicast);
3097         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3098                             vsi->offset_loaded,  &oes->tx_broadcast,
3099                             &nes->tx_broadcast);
3100         /* GLV_TDPC not supported */
3101         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3102                             &oes->tx_errors, &nes->tx_errors);
3103         vsi->offset_loaded = true;
3104
3105         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3106                     vsi->vsi_id);
3107         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3108         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3109         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3110         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3111         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3112         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3113                     nes->rx_unknown_protocol);
3114         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3115         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3116         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3117         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3118         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3119         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3120         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3121                     vsi->vsi_id);
3122 }
3123
3124 static void
3125 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3126 {
3127         unsigned int i;
3128         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3129         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
3130
3131         /* Get rx/tx bytes of internal transfer packets */
3132         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
3133                                   I40E_GLV_GORCL(hw->port),
3134                                   pf->offset_loaded,
3135                                   &pf->internal_stats_offset.rx_bytes,
3136                                   &pf->internal_stats.rx_bytes,
3137                                   &pf->internal_prev_rx_bytes);
3138         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
3139                                   I40E_GLV_GOTCL(hw->port),
3140                                   pf->offset_loaded,
3141                                   &pf->internal_stats_offset.tx_bytes,
3142                                   &pf->internal_stats.tx_bytes,
3143                                   &pf->internal_prev_tx_bytes);
3144         /* Get total internal rx packet count */
3145         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3146                             I40E_GLV_UPRCL(hw->port),
3147                             pf->offset_loaded,
3148                             &pf->internal_stats_offset.rx_unicast,
3149                             &pf->internal_stats.rx_unicast);
3150         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3151                             I40E_GLV_MPRCL(hw->port),
3152                             pf->offset_loaded,
3153                             &pf->internal_stats_offset.rx_multicast,
3154                             &pf->internal_stats.rx_multicast);
3155         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3156                             I40E_GLV_BPRCL(hw->port),
3157                             pf->offset_loaded,
3158                             &pf->internal_stats_offset.rx_broadcast,
3159                             &pf->internal_stats.rx_broadcast);
3160         /* Get total internal tx packet count */
3161         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3162                             I40E_GLV_UPTCL(hw->port),
3163                             pf->offset_loaded,
3164                             &pf->internal_stats_offset.tx_unicast,
3165                             &pf->internal_stats.tx_unicast);
3166         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3167                             I40E_GLV_MPTCL(hw->port),
3168                             pf->offset_loaded,
3169                             &pf->internal_stats_offset.tx_multicast,
3170                             &pf->internal_stats.tx_multicast);
3171         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3172                             I40E_GLV_BPTCL(hw->port),
3173                             pf->offset_loaded,
3174                             &pf->internal_stats_offset.tx_broadcast,
3175                             &pf->internal_stats.tx_broadcast);
3176
3177         /* exclude CRC size */
3178         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3179                 pf->internal_stats.rx_multicast +
3180                 pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3181
3182         /* Get statistics of struct i40e_eth_stats */
3183         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3184                                   I40E_GLPRT_GORCL(hw->port),
3185                                   pf->offset_loaded, &os->eth.rx_bytes,
3186                                   &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3187         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3188                             I40E_GLPRT_UPRCL(hw->port),
3189                             pf->offset_loaded, &os->eth.rx_unicast,
3190                             &ns->eth.rx_unicast);
3191         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3192                             I40E_GLPRT_MPRCL(hw->port),
3193                             pf->offset_loaded, &os->eth.rx_multicast,
3194                             &ns->eth.rx_multicast);
3195         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3196                             I40E_GLPRT_BPRCL(hw->port),
3197                             pf->offset_loaded, &os->eth.rx_broadcast,
3198                             &ns->eth.rx_broadcast);
3199         /* Workaround: CRC size should not be included in byte statistics,
3200          * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3201          * packet.
3202          */
3203         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3204                 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3205
3206         /* exclude internal rx bytes
3207          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
3208          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
3209          * value.
3210          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
3211          */
3212         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3213                 ns->eth.rx_bytes = 0;
3214         else
3215                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3216
3217         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3218                 ns->eth.rx_unicast = 0;
3219         else
3220                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3221
3222         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3223                 ns->eth.rx_multicast = 0;
3224         else
3225                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3226
3227         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3228                 ns->eth.rx_broadcast = 0;
3229         else
3230                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3231
3232         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3233                             pf->offset_loaded, &os->eth.rx_discards,
3234                             &ns->eth.rx_discards);
3235         /* GLPRT_REPC not supported */
3236         /* GLPRT_RMPC not supported */
3237         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3238                             pf->offset_loaded,
3239                             &os->eth.rx_unknown_protocol,
3240                             &ns->eth.rx_unknown_protocol);
3241         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3242                                   I40E_GLPRT_GOTCL(hw->port),
3243                                   pf->offset_loaded, &os->eth.tx_bytes,
3244                                   &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3245         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3246                             I40E_GLPRT_UPTCL(hw->port),
3247                             pf->offset_loaded, &os->eth.tx_unicast,
3248                             &ns->eth.tx_unicast);
3249         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3250                             I40E_GLPRT_MPTCL(hw->port),
3251                             pf->offset_loaded, &os->eth.tx_multicast,
3252                             &ns->eth.tx_multicast);
3253         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3254                             I40E_GLPRT_BPTCL(hw->port),
3255                             pf->offset_loaded, &os->eth.tx_broadcast,
3256                             &ns->eth.tx_broadcast);
3257         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3258                 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3259
3260         /* exclude internal tx bytes
3261          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3262          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3263          * value.
3264          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3265          */
3266         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3267                 ns->eth.tx_bytes = 0;
3268         else
3269                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3270
3271         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3272                 ns->eth.tx_unicast = 0;
3273         else
3274                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3275
3276         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3277                 ns->eth.tx_multicast = 0;
3278         else
3279                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3280
3281         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3282                 ns->eth.tx_broadcast = 0;
3283         else
3284                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3285
3286         /* GLPRT_TEPC not supported */
3287
3288         /* additional port specific stats */
3289         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3290                             pf->offset_loaded, &os->tx_dropped_link_down,
3291                             &ns->tx_dropped_link_down);
3292         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3293                             pf->offset_loaded, &os->crc_errors,
3294                             &ns->crc_errors);
3295         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3296                             pf->offset_loaded, &os->illegal_bytes,
3297                             &ns->illegal_bytes);
3298         /* GLPRT_ERRBC not supported */
3299         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3300                             pf->offset_loaded, &os->mac_local_faults,
3301                             &ns->mac_local_faults);
3302         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3303                             pf->offset_loaded, &os->mac_remote_faults,
3304                             &ns->mac_remote_faults);
3305         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3306                             pf->offset_loaded, &os->rx_length_errors,
3307                             &ns->rx_length_errors);
3308         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3309                             pf->offset_loaded, &os->link_xon_rx,
3310                             &ns->link_xon_rx);
3311         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3312                             pf->offset_loaded, &os->link_xoff_rx,
3313                             &ns->link_xoff_rx);
3314         for (i = 0; i < 8; i++) {
3315                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3316                                     pf->offset_loaded,
3317                                     &os->priority_xon_rx[i],
3318                                     &ns->priority_xon_rx[i]);
3319                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3320                                     pf->offset_loaded,
3321                                     &os->priority_xoff_rx[i],
3322                                     &ns->priority_xoff_rx[i]);
3323         }
3324         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3325                             pf->offset_loaded, &os->link_xon_tx,
3326                             &ns->link_xon_tx);
3327         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3328                             pf->offset_loaded, &os->link_xoff_tx,
3329                             &ns->link_xoff_tx);
3330         for (i = 0; i < 8; i++) {
3331                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3332                                     pf->offset_loaded,
3333                                     &os->priority_xon_tx[i],
3334                                     &ns->priority_xon_tx[i]);
3335                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3336                                     pf->offset_loaded,
3337                                     &os->priority_xoff_tx[i],
3338                                     &ns->priority_xoff_tx[i]);
3339                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3340                                     pf->offset_loaded,
3341                                     &os->priority_xon_2_xoff[i],
3342                                     &ns->priority_xon_2_xoff[i]);
3343         }
3344         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3345                             I40E_GLPRT_PRC64L(hw->port),
3346                             pf->offset_loaded, &os->rx_size_64,
3347                             &ns->rx_size_64);
3348         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3349                             I40E_GLPRT_PRC127L(hw->port),
3350                             pf->offset_loaded, &os->rx_size_127,
3351                             &ns->rx_size_127);
3352         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3353                             I40E_GLPRT_PRC255L(hw->port),
3354                             pf->offset_loaded, &os->rx_size_255,
3355                             &ns->rx_size_255);
3356         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3357                             I40E_GLPRT_PRC511L(hw->port),
3358                             pf->offset_loaded, &os->rx_size_511,
3359                             &ns->rx_size_511);
3360         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3361                             I40E_GLPRT_PRC1023L(hw->port),
3362                             pf->offset_loaded, &os->rx_size_1023,
3363                             &ns->rx_size_1023);
3364         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3365                             I40E_GLPRT_PRC1522L(hw->port),
3366                             pf->offset_loaded, &os->rx_size_1522,
3367                             &ns->rx_size_1522);
3368         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3369                             I40E_GLPRT_PRC9522L(hw->port),
3370                             pf->offset_loaded, &os->rx_size_big,
3371                             &ns->rx_size_big);
3372         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3373                             pf->offset_loaded, &os->rx_undersize,
3374                             &ns->rx_undersize);
3375         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3376                             pf->offset_loaded, &os->rx_fragments,
3377                             &ns->rx_fragments);
3378         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3379                             pf->offset_loaded, &os->rx_oversize,
3380                             &ns->rx_oversize);
3381         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3382                             pf->offset_loaded, &os->rx_jabber,
3383                             &ns->rx_jabber);
3384         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3385                             I40E_GLPRT_PTC64L(hw->port),
3386                             pf->offset_loaded, &os->tx_size_64,
3387                             &ns->tx_size_64);
3388         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3389                             I40E_GLPRT_PTC127L(hw->port),
3390                             pf->offset_loaded, &os->tx_size_127,
3391                             &ns->tx_size_127);
3392         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3393                             I40E_GLPRT_PTC255L(hw->port),
3394                             pf->offset_loaded, &os->tx_size_255,
3395                             &ns->tx_size_255);
3396         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3397                             I40E_GLPRT_PTC511L(hw->port),
3398                             pf->offset_loaded, &os->tx_size_511,
3399                             &ns->tx_size_511);
3400         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3401                             I40E_GLPRT_PTC1023L(hw->port),
3402                             pf->offset_loaded, &os->tx_size_1023,
3403                             &ns->tx_size_1023);
3404         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3405                             I40E_GLPRT_PTC1522L(hw->port),
3406                             pf->offset_loaded, &os->tx_size_1522,
3407                             &ns->tx_size_1522);
3408         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3409                             I40E_GLPRT_PTC9522L(hw->port),
3410                             pf->offset_loaded, &os->tx_size_big,
3411                             &ns->tx_size_big);
3412         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3413                            pf->offset_loaded,
3414                            &os->fd_sb_match, &ns->fd_sb_match);
3415         /* GLPRT_MSPDC not supported */
3416         /* GLPRT_XEC not supported */
3417
3418         pf->offset_loaded = true;
3419
3420         if (pf->main_vsi)
3421                 i40e_update_vsi_stats(pf->main_vsi);
3422 }
3423
3424 /* Get all statistics of a port */
3425 static int
3426 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3427 {
3428         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3429         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3430         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3431         struct i40e_vsi *vsi;
3432         unsigned i;
3433
3434         /* call read registers - updates values, now write them to struct */
3435         i40e_read_stats_registers(pf, hw);
3436
3437         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3438                         pf->main_vsi->eth_stats.rx_multicast +
3439                         pf->main_vsi->eth_stats.rx_broadcast -
3440                         pf->main_vsi->eth_stats.rx_discards;
3441         stats->opackets = ns->eth.tx_unicast +
3442                         ns->eth.tx_multicast +
3443                         ns->eth.tx_broadcast;
3444         stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3445         stats->obytes   = ns->eth.tx_bytes;
3446         stats->oerrors  = ns->eth.tx_errors +
3447                         pf->main_vsi->eth_stats.tx_errors;
3448
3449         /* Rx Errors */
3450         stats->imissed  = ns->eth.rx_discards +
3451                         pf->main_vsi->eth_stats.rx_discards;
3452         stats->ierrors  = ns->crc_errors +
3453                         ns->rx_length_errors + ns->rx_undersize +
3454                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3455
3456         if (pf->vfs) {
3457                 for (i = 0; i < pf->vf_num; i++) {
3458                         vsi = pf->vfs[i].vsi;
3459                         i40e_update_vsi_stats(vsi);
3460
3461                         stats->ipackets += (vsi->eth_stats.rx_unicast +
3462                                         vsi->eth_stats.rx_multicast +
3463                                         vsi->eth_stats.rx_broadcast -
3464                                         vsi->eth_stats.rx_discards);
3465                         stats->ibytes   += vsi->eth_stats.rx_bytes;
3466                         stats->oerrors  += vsi->eth_stats.tx_errors;
3467                         stats->imissed  += vsi->eth_stats.rx_discards;
3468                 }
3469         }
3470
3471         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3472         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3473         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3474         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3475         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3476         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3477         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3478                     ns->eth.rx_unknown_protocol);
3479         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3480         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3481         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3482         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3483         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3484         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3485
3486         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3487                     ns->tx_dropped_link_down);
3488         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3489         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3490                     ns->illegal_bytes);
3491         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3492         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3493                     ns->mac_local_faults);
3494         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3495                     ns->mac_remote_faults);
3496         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3497                     ns->rx_length_errors);
3498         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3499         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3500         for (i = 0; i < 8; i++) {
3501                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3502                                 i, ns->priority_xon_rx[i]);
3503                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3504                                 i, ns->priority_xoff_rx[i]);
3505         }
3506         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3507         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3508         for (i = 0; i < 8; i++) {
3509                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3510                                 i, ns->priority_xon_tx[i]);
3511                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3512                                 i, ns->priority_xoff_tx[i]);
3513                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3514                                 i, ns->priority_xon_2_xoff[i]);
3515         }
3516         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3517         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3518         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3519         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3520         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3521         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3522         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3523         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3524         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3525         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3526         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3527         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3528         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3529         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3530         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3531         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3532         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3533         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3534         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3535                         ns->mac_short_packet_dropped);
3536         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3537                     ns->checksum_error);
3538         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3539         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3540         return 0;
3541 }
3542
3543 /* Reset the statistics */
3544 static int
3545 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3546 {
3547         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3548         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3549
3550         /* Mark PF and VSI stats to update the offset, aka "reset" */
3551         pf->offset_loaded = false;
3552         if (pf->main_vsi)
3553                 pf->main_vsi->offset_loaded = false;
3554
3555         /* read the stats, reading current register values into offset */
3556         i40e_read_stats_registers(pf, hw);
3557
3558         return 0;
3559 }
3560
3561 static uint32_t
3562 i40e_xstats_calc_num(void)
3563 {
3564         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3565                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3566                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3567 }
3568
3569 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3570                                      struct rte_eth_xstat_name *xstats_names,
3571                                      __rte_unused unsigned limit)
3572 {
3573         unsigned count = 0;
3574         unsigned i, prio;
3575
3576         if (xstats_names == NULL)
3577                 return i40e_xstats_calc_num();
3578
3579         /* Note: limit checked in rte_eth_xstats_names() */
3580
3581         /* Get stats from i40e_eth_stats struct */
3582         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3583                 strlcpy(xstats_names[count].name,
3584                         rte_i40e_stats_strings[i].name,
3585                         sizeof(xstats_names[count].name));
3586                 count++;
3587         }
3588
3589         /* Get individiual stats from i40e_hw_port struct */
3590         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3591                 strlcpy(xstats_names[count].name,
3592                         rte_i40e_hw_port_strings[i].name,
3593                         sizeof(xstats_names[count].name));
3594                 count++;
3595         }
3596
3597         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3598                 for (prio = 0; prio < 8; prio++) {
3599                         snprintf(xstats_names[count].name,
3600                                  sizeof(xstats_names[count].name),
3601                                  "rx_priority%u_%s", prio,
3602                                  rte_i40e_rxq_prio_strings[i].name);
3603                         count++;
3604                 }
3605         }
3606
3607         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3608                 for (prio = 0; prio < 8; prio++) {
3609                         snprintf(xstats_names[count].name,
3610                                  sizeof(xstats_names[count].name),
3611                                  "tx_priority%u_%s", prio,
3612                                  rte_i40e_txq_prio_strings[i].name);
3613                         count++;
3614                 }
3615         }
3616         return count;
3617 }
3618
3619 static int
3620 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3621                     unsigned n)
3622 {
3623         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3624         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3625         unsigned i, count, prio;
3626         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3627
3628         count = i40e_xstats_calc_num();
3629         if (n < count)
3630                 return count;
3631
3632         i40e_read_stats_registers(pf, hw);
3633
3634         if (xstats == NULL)
3635                 return 0;
3636
3637         count = 0;
3638
3639         /* Get stats from i40e_eth_stats struct */
3640         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3641                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3642                         rte_i40e_stats_strings[i].offset);
3643                 xstats[count].id = count;
3644                 count++;
3645         }
3646
3647         /* Get individiual stats from i40e_hw_port struct */
3648         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3649                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3650                         rte_i40e_hw_port_strings[i].offset);
3651                 xstats[count].id = count;
3652                 count++;
3653         }
3654
3655         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3656                 for (prio = 0; prio < 8; prio++) {
3657                         xstats[count].value =
3658                                 *(uint64_t *)(((char *)hw_stats) +
3659                                 rte_i40e_rxq_prio_strings[i].offset +
3660                                 (sizeof(uint64_t) * prio));
3661                         xstats[count].id = count;
3662                         count++;
3663                 }
3664         }
3665
3666         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3667                 for (prio = 0; prio < 8; prio++) {
3668                         xstats[count].value =
3669                                 *(uint64_t *)(((char *)hw_stats) +
3670                                 rte_i40e_txq_prio_strings[i].offset +
3671                                 (sizeof(uint64_t) * prio));
3672                         xstats[count].id = count;
3673                         count++;
3674                 }
3675         }
3676
3677         return count;
3678 }
3679
3680 static int
3681 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3682 {
3683         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3684         u32 full_ver;
3685         u8 ver, patch;
3686         u16 build;
3687         int ret;
3688
3689         full_ver = hw->nvm.oem_ver;
3690         ver = (u8)(full_ver >> 24);
3691         build = (u16)((full_ver >> 8) & 0xffff);
3692         patch = (u8)(full_ver & 0xff);
3693
3694         ret = snprintf(fw_version, fw_size,
3695                  "%d.%d%d 0x%08x %d.%d.%d",
3696                  ((hw->nvm.version >> 12) & 0xf),
3697                  ((hw->nvm.version >> 4) & 0xff),
3698                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3699                  ver, build, patch);
3700         if (ret < 0)
3701                 return -EINVAL;
3702
3703         ret += 1; /* add the size of '\0' */
3704         if (fw_size < (size_t)ret)
3705                 return ret;
3706         else
3707                 return 0;
3708 }
3709
3710 /*
3711  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3712  * the Rx data path does not hang if the FW LLDP is stopped.
3713  * return true if lldp need to stop
3714  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3715  */
3716 static bool
3717 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3718 {
3719         double nvm_ver;
3720         char ver_str[64] = {0};
3721         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3722
3723         i40e_fw_version_get(dev, ver_str, 64);
3724         nvm_ver = atof(ver_str);
3725         if ((hw->mac.type == I40E_MAC_X722 ||
3726              hw->mac.type == I40E_MAC_X722_VF) &&
3727              ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3728                 return true;
3729         else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3730                 return true;
3731
3732         return false;
3733 }
3734
3735 static int
3736 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3737 {
3738         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3739         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3740         struct i40e_vsi *vsi = pf->main_vsi;
3741         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3742
3743         dev_info->max_rx_queues = vsi->nb_qps;
3744         dev_info->max_tx_queues = vsi->nb_qps;
3745         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3746         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3747         dev_info->max_mac_addrs = vsi->max_macaddrs;
3748         dev_info->max_vfs = pci_dev->max_vfs;
3749         dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3750         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3751         dev_info->rx_queue_offload_capa = 0;
3752         dev_info->rx_offload_capa =
3753                 DEV_RX_OFFLOAD_VLAN_STRIP |
3754                 DEV_RX_OFFLOAD_QINQ_STRIP |
3755                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3756                 DEV_RX_OFFLOAD_UDP_CKSUM |
3757                 DEV_RX_OFFLOAD_TCP_CKSUM |
3758                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3759                 DEV_RX_OFFLOAD_KEEP_CRC |
3760                 DEV_RX_OFFLOAD_SCATTER |
3761                 DEV_RX_OFFLOAD_VLAN_EXTEND |
3762                 DEV_RX_OFFLOAD_VLAN_FILTER |
3763                 DEV_RX_OFFLOAD_JUMBO_FRAME |
3764                 DEV_RX_OFFLOAD_RSS_HASH;
3765
3766         dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3767         dev_info->tx_offload_capa =
3768                 DEV_TX_OFFLOAD_VLAN_INSERT |
3769                 DEV_TX_OFFLOAD_QINQ_INSERT |
3770                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3771                 DEV_TX_OFFLOAD_UDP_CKSUM |
3772                 DEV_TX_OFFLOAD_TCP_CKSUM |
3773                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3774                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3775                 DEV_TX_OFFLOAD_TCP_TSO |
3776                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3777                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3778                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3779                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3780                 DEV_TX_OFFLOAD_MULTI_SEGS |
3781                 dev_info->tx_queue_offload_capa;
3782         dev_info->dev_capa =
3783                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3784                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3785
3786         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3787                                                 sizeof(uint32_t);
3788         dev_info->reta_size = pf->hash_lut_size;
3789         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3790
3791         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3792                 .rx_thresh = {
3793                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3794                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3795                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3796                 },
3797                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3798                 .rx_drop_en = 0,
3799                 .offloads = 0,
3800         };
3801
3802         dev_info->default_txconf = (struct rte_eth_txconf) {
3803                 .tx_thresh = {
3804                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3805                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3806                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3807                 },
3808                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3809                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3810                 .offloads = 0,
3811         };
3812
3813         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3814                 .nb_max = I40E_MAX_RING_DESC,
3815                 .nb_min = I40E_MIN_RING_DESC,
3816                 .nb_align = I40E_ALIGN_RING_DESC,
3817         };
3818
3819         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3820                 .nb_max = I40E_MAX_RING_DESC,
3821                 .nb_min = I40E_MIN_RING_DESC,
3822                 .nb_align = I40E_ALIGN_RING_DESC,
3823                 .nb_seg_max = I40E_TX_MAX_SEG,
3824                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3825         };
3826
3827         if (pf->flags & I40E_FLAG_VMDQ) {
3828                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3829                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3830                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3831                                                 pf->max_nb_vmdq_vsi;
3832                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3833                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3834                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3835         }
3836
3837         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3838                 /* For XL710 */
3839                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3840                 dev_info->default_rxportconf.nb_queues = 2;
3841                 dev_info->default_txportconf.nb_queues = 2;
3842                 if (dev->data->nb_rx_queues == 1)
3843                         dev_info->default_rxportconf.ring_size = 2048;
3844                 else
3845                         dev_info->default_rxportconf.ring_size = 1024;
3846                 if (dev->data->nb_tx_queues == 1)
3847                         dev_info->default_txportconf.ring_size = 1024;
3848                 else
3849                         dev_info->default_txportconf.ring_size = 512;
3850
3851         } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3852                 /* For XXV710 */
3853                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3854                 dev_info->default_rxportconf.nb_queues = 1;
3855                 dev_info->default_txportconf.nb_queues = 1;
3856                 dev_info->default_rxportconf.ring_size = 256;
3857                 dev_info->default_txportconf.ring_size = 256;
3858         } else {
3859                 /* For X710 */
3860                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3861                 dev_info->default_rxportconf.nb_queues = 1;
3862                 dev_info->default_txportconf.nb_queues = 1;
3863                 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3864                         dev_info->default_rxportconf.ring_size = 512;
3865                         dev_info->default_txportconf.ring_size = 256;
3866                 } else {
3867                         dev_info->default_rxportconf.ring_size = 256;
3868                         dev_info->default_txportconf.ring_size = 256;
3869                 }
3870         }
3871         dev_info->default_rxportconf.burst_size = 32;
3872         dev_info->default_txportconf.burst_size = 32;
3873
3874         return 0;
3875 }
3876
3877 static int
3878 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3879 {
3880         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3881         struct i40e_vsi *vsi = pf->main_vsi;
3882         PMD_INIT_FUNC_TRACE();
3883
3884         if (on)
3885                 return i40e_vsi_add_vlan(vsi, vlan_id);
3886         else
3887                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3888 }
3889
3890 static int
3891 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3892                                 enum rte_vlan_type vlan_type,
3893                                 uint16_t tpid, int qinq)
3894 {
3895         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3896         uint64_t reg_r = 0;
3897         uint64_t reg_w = 0;
3898         uint16_t reg_id = 3;
3899         int ret;
3900
3901         if (qinq) {
3902                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3903                         reg_id = 2;
3904         }
3905
3906         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3907                                           &reg_r, NULL);
3908         if (ret != I40E_SUCCESS) {
3909                 PMD_DRV_LOG(ERR,
3910                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3911                            reg_id);
3912                 return -EIO;
3913         }
3914         PMD_DRV_LOG(DEBUG,
3915                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3916                     reg_id, reg_r);
3917
3918         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3919         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3920         if (reg_r == reg_w) {
3921                 PMD_DRV_LOG(DEBUG, "No need to write");
3922                 return 0;
3923         }
3924
3925         ret = i40e_aq_debug_write_global_register(hw,
3926                                            I40E_GL_SWT_L2TAGCTRL(reg_id),
3927                                            reg_w, NULL);
3928         if (ret != I40E_SUCCESS) {
3929                 PMD_DRV_LOG(ERR,
3930                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3931                             reg_id);
3932                 return -EIO;
3933         }
3934         PMD_DRV_LOG(DEBUG,
3935                     "Global register 0x%08x is changed with value 0x%08x",
3936                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3937
3938         return 0;
3939 }
3940
3941 static int
3942 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3943                    enum rte_vlan_type vlan_type,
3944                    uint16_t tpid)
3945 {
3946         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3947         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3948         int qinq = dev->data->dev_conf.rxmode.offloads &
3949                    DEV_RX_OFFLOAD_VLAN_EXTEND;
3950         int ret = 0;
3951
3952         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3953              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3954             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3955                 PMD_DRV_LOG(ERR,
3956                             "Unsupported vlan type.");
3957                 return -EINVAL;
3958         }
3959
3960         if (pf->support_multi_driver) {
3961                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3962                 return -ENOTSUP;
3963         }
3964
3965         /* 802.1ad frames ability is added in NVM API 1.7*/
3966         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3967                 if (qinq) {
3968                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3969                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3970                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3971                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3972                 } else {
3973                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3974                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3975                 }
3976                 ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3977                 if (ret != I40E_SUCCESS) {
3978                         PMD_DRV_LOG(ERR,
3979                                     "Set switch config failed aq_err: %d",
3980                                     hw->aq.asq_last_status);
3981                         ret = -EIO;
3982                 }
3983         } else
3984                 /* If NVM API < 1.7, keep the register setting */
3985                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3986                                                       tpid, qinq);
3987
3988         return ret;
3989 }
3990
3991 /* Configure outer vlan stripping on or off in QinQ mode */
3992 static int
3993 i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
3994 {
3995         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3996         int ret = I40E_SUCCESS;
3997         uint32_t reg;
3998
3999         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
4000                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
4001                 return -EINVAL;
4002         }
4003
4004         /* Configure for outer VLAN RX stripping */
4005         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
4006
4007         if (on)
4008                 reg |= I40E_VSI_TSR_QINQ_STRIP;
4009         else
4010                 reg &= ~I40E_VSI_TSR_QINQ_STRIP;
4011
4012         ret = i40e_aq_debug_write_register(hw,
4013                                                    I40E_VSI_TSR(vsi->vsi_id),
4014                                                    reg, NULL);
4015         if (ret < 0) {
4016                 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
4017                                     vsi->vsi_id);
4018                 return I40E_ERR_CONFIG;
4019         }
4020
4021         return ret;
4022 }
4023
4024 static int
4025 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4026 {
4027         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4028         struct i40e_vsi *vsi = pf->main_vsi;
4029         struct rte_eth_rxmode *rxmode;
4030
4031         rxmode = &dev->data->dev_conf.rxmode;
4032         if (mask & ETH_VLAN_FILTER_MASK) {
4033                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4034                         i40e_vsi_config_vlan_filter(vsi, TRUE);
4035                 else
4036                         i40e_vsi_config_vlan_filter(vsi, FALSE);
4037         }
4038
4039         if (mask & ETH_VLAN_STRIP_MASK) {
4040                 /* Enable or disable VLAN stripping */
4041                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4042                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
4043                 else
4044                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
4045         }
4046
4047         if (mask & ETH_VLAN_EXTEND_MASK) {
4048                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
4049                         i40e_vsi_config_double_vlan(vsi, TRUE);
4050                         /* Set global registers with default ethertype. */
4051                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
4052                                            RTE_ETHER_TYPE_VLAN);
4053                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
4054                                            RTE_ETHER_TYPE_VLAN);
4055                 }
4056                 else
4057                         i40e_vsi_config_double_vlan(vsi, FALSE);
4058         }
4059
4060         if (mask & ETH_QINQ_STRIP_MASK) {
4061                 /* Enable or disable outer VLAN stripping */
4062                 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
4063                         i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
4064                 else
4065                         i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
4066         }
4067
4068         return 0;
4069 }
4070
4071 static void
4072 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4073                           __rte_unused uint16_t queue,
4074                           __rte_unused int on)
4075 {
4076         PMD_INIT_FUNC_TRACE();
4077 }
4078
4079 static int
4080 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4081 {
4082         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4083         struct i40e_vsi *vsi = pf->main_vsi;
4084         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4085         struct i40e_vsi_vlan_pvid_info info;
4086
4087         memset(&info, 0, sizeof(info));
4088         info.on = on;
4089         if (info.on)
4090                 info.config.pvid = pvid;
4091         else {
4092                 info.config.reject.tagged =
4093                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
4094                 info.config.reject.untagged =
4095                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
4096         }
4097
4098         return i40e_vsi_vlan_pvid_set(vsi, &info);
4099 }
4100
4101 static int
4102 i40e_dev_led_on(struct rte_eth_dev *dev)
4103 {
4104         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4105         uint32_t mode = i40e_led_get(hw);
4106
4107         if (mode == 0)
4108                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
4109
4110         return 0;
4111 }
4112
4113 static int
4114 i40e_dev_led_off(struct rte_eth_dev *dev)
4115 {
4116         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4117         uint32_t mode = i40e_led_get(hw);
4118
4119         if (mode != 0)
4120                 i40e_led_set(hw, 0, false);
4121
4122         return 0;
4123 }
4124
4125 static int
4126 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4127 {
4128         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4129         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4130
4131         fc_conf->pause_time = pf->fc_conf.pause_time;
4132
4133         /* read out from register, in case they are modified by other port */
4134         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
4135                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
4136         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
4137                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
4138
4139         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4140         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4141
4142          /* Return current mode according to actual setting*/
4143         switch (hw->fc.current_mode) {
4144         case I40E_FC_FULL:
4145                 fc_conf->mode = RTE_FC_FULL;
4146                 break;
4147         case I40E_FC_TX_PAUSE:
4148                 fc_conf->mode = RTE_FC_TX_PAUSE;
4149                 break;
4150         case I40E_FC_RX_PAUSE:
4151                 fc_conf->mode = RTE_FC_RX_PAUSE;
4152                 break;
4153         case I40E_FC_NONE:
4154         default:
4155                 fc_conf->mode = RTE_FC_NONE;
4156         };
4157
4158         return 0;
4159 }
4160
4161 static int
4162 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4163 {
4164         uint32_t mflcn_reg, fctrl_reg, reg;
4165         uint32_t max_high_water;
4166         uint8_t i, aq_failure;
4167         int err;
4168         struct i40e_hw *hw;
4169         struct i40e_pf *pf;
4170         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4171                 [RTE_FC_NONE] = I40E_FC_NONE,
4172                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4173                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4174                 [RTE_FC_FULL] = I40E_FC_FULL
4175         };
4176
4177         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4178
4179         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4180         if ((fc_conf->high_water > max_high_water) ||
4181                         (fc_conf->high_water < fc_conf->low_water)) {
4182                 PMD_INIT_LOG(ERR,
4183                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
4184                         max_high_water);
4185                 return -EINVAL;
4186         }
4187
4188         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4189         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4190         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4191
4192         pf->fc_conf.pause_time = fc_conf->pause_time;
4193         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4194         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4195
4196         PMD_INIT_FUNC_TRACE();
4197
4198         /* All the link flow control related enable/disable register
4199          * configuration is handle by the F/W
4200          */
4201         err = i40e_set_fc(hw, &aq_failure, true);
4202         if (err < 0)
4203                 return -ENOSYS;
4204
4205         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4206                 /* Configure flow control refresh threshold,
4207                  * the value for stat_tx_pause_refresh_timer[8]
4208                  * is used for global pause operation.
4209                  */
4210
4211                 I40E_WRITE_REG(hw,
4212                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4213                                pf->fc_conf.pause_time);
4214
4215                 /* configure the timer value included in transmitted pause
4216                  * frame,
4217                  * the value for stat_tx_pause_quanta[8] is used for global
4218                  * pause operation
4219                  */
4220                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4221                                pf->fc_conf.pause_time);
4222
4223                 fctrl_reg = I40E_READ_REG(hw,
4224                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4225
4226                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4227                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4228                 else
4229                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4230
4231                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4232                                fctrl_reg);
4233         } else {
4234                 /* Configure pause time (2 TCs per register) */
4235                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4236                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4237                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4238
4239                 /* Configure flow control refresh threshold value */
4240                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4241                                pf->fc_conf.pause_time / 2);
4242
4243                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4244
4245                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
4246                  *depending on configuration
4247                  */
4248                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
4249                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4250                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4251                 } else {
4252                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4253                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4254                 }
4255
4256                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4257         }
4258
4259         if (!pf->support_multi_driver) {
4260                 /* config water marker both based on the packets and bytes */
4261                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4262                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4263                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4264                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4265                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4266                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4267                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4268                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4269                                   << I40E_KILOSHIFT);
4270                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4271                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4272                                    << I40E_KILOSHIFT);
4273         } else {
4274                 PMD_DRV_LOG(ERR,
4275                             "Water marker configuration is not supported.");
4276         }
4277
4278         I40E_WRITE_FLUSH(hw);
4279
4280         return 0;
4281 }
4282
4283 static int
4284 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4285                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4286 {
4287         PMD_INIT_FUNC_TRACE();
4288
4289         return -ENOSYS;
4290 }
4291
4292 /* Add a MAC address, and update filters */
4293 static int
4294 i40e_macaddr_add(struct rte_eth_dev *dev,
4295                  struct rte_ether_addr *mac_addr,
4296                  __rte_unused uint32_t index,
4297                  uint32_t pool)
4298 {
4299         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4300         struct i40e_mac_filter_info mac_filter;
4301         struct i40e_vsi *vsi;
4302         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4303         int ret;
4304
4305         /* If VMDQ not enabled or configured, return */
4306         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4307                           !pf->nb_cfg_vmdq_vsi)) {
4308                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4309                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4310                         pool);
4311                 return -ENOTSUP;
4312         }
4313
4314         if (pool > pf->nb_cfg_vmdq_vsi) {
4315                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4316                                 pool, pf->nb_cfg_vmdq_vsi);
4317                 return -EINVAL;
4318         }
4319
4320         rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4321         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4322                 mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
4323         else
4324                 mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
4325
4326         if (pool == 0)
4327                 vsi = pf->main_vsi;
4328         else
4329                 vsi = pf->vmdq[pool - 1].vsi;
4330
4331         ret = i40e_vsi_add_mac(vsi, &mac_filter);
4332         if (ret != I40E_SUCCESS) {
4333                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4334                 return -ENODEV;
4335         }
4336         return 0;
4337 }
4338
4339 /* Remove a MAC address, and update filters */
4340 static void
4341 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4342 {
4343         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4344         struct i40e_vsi *vsi;
4345         struct rte_eth_dev_data *data = dev->data;
4346         struct rte_ether_addr *macaddr;
4347         int ret;
4348         uint32_t i;
4349         uint64_t pool_sel;
4350
4351         macaddr = &(data->mac_addrs[index]);
4352
4353         pool_sel = dev->data->mac_pool_sel[index];
4354
4355         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4356                 if (pool_sel & (1ULL << i)) {
4357                         if (i == 0)
4358                                 vsi = pf->main_vsi;
4359                         else {
4360                                 /* No VMDQ pool enabled or configured */
4361                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
4362                                         (i > pf->nb_cfg_vmdq_vsi)) {
4363                                         PMD_DRV_LOG(ERR,
4364                                                 "No VMDQ pool enabled/configured");
4365                                         return;
4366                                 }
4367                                 vsi = pf->vmdq[i - 1].vsi;
4368                         }
4369                         ret = i40e_vsi_delete_mac(vsi, macaddr);
4370
4371                         if (ret) {
4372                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4373                                 return;
4374                         }
4375                 }
4376         }
4377 }
4378
4379 static int
4380 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4381 {
4382         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4383         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4384         uint32_t reg;
4385         int ret;
4386
4387         if (!lut)
4388                 return -EINVAL;
4389
4390         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4391                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4392                                           vsi->type != I40E_VSI_SRIOV,
4393                                           lut, lut_size);
4394                 if (ret) {
4395                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4396                         return ret;
4397                 }
4398         } else {
4399                 uint32_t *lut_dw = (uint32_t *)lut;
4400                 uint16_t i, lut_size_dw = lut_size / 4;
4401
4402                 if (vsi->type == I40E_VSI_SRIOV) {
4403                         for (i = 0; i <= lut_size_dw; i++) {
4404                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4405                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4406                         }
4407                 } else {
4408                         for (i = 0; i < lut_size_dw; i++)
4409                                 lut_dw[i] = I40E_READ_REG(hw,
4410                                                           I40E_PFQF_HLUT(i));
4411                 }
4412         }
4413
4414         return 0;
4415 }
4416
4417 int
4418 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4419 {
4420         struct i40e_pf *pf;
4421         struct i40e_hw *hw;
4422
4423         if (!vsi || !lut)
4424                 return -EINVAL;
4425
4426         pf = I40E_VSI_TO_PF(vsi);
4427         hw = I40E_VSI_TO_HW(vsi);
4428
4429         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4430                 enum i40e_status_code status;
4431
4432                 status = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4433                                              vsi->type != I40E_VSI_SRIOV,
4434                                              lut, lut_size);
4435                 if (status) {
4436                         PMD_DRV_LOG(ERR,
4437                                     "Failed to update RSS lookup table, error status: %d",
4438                                     status);
4439                         return -EIO;
4440                 }
4441         } else {
4442                 uint32_t *lut_dw = (uint32_t *)lut;
4443                 uint16_t i, lut_size_dw = lut_size / 4;
4444
4445                 if (vsi->type == I40E_VSI_SRIOV) {
4446                         for (i = 0; i < lut_size_dw; i++)
4447                                 I40E_WRITE_REG(
4448                                         hw,
4449                                         I40E_VFQF_HLUT1(i, vsi->user_param),
4450                                         lut_dw[i]);
4451                 } else {
4452                         for (i = 0; i < lut_size_dw; i++)
4453                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4454                                                lut_dw[i]);
4455                 }
4456                 I40E_WRITE_FLUSH(hw);
4457         }
4458
4459         return 0;
4460 }
4461
4462 static int
4463 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4464                          struct rte_eth_rss_reta_entry64 *reta_conf,
4465                          uint16_t reta_size)
4466 {
4467         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4468         uint16_t i, lut_size = pf->hash_lut_size;
4469         uint16_t idx, shift;
4470         uint8_t *lut;
4471         int ret;
4472
4473         if (reta_size != lut_size ||
4474                 reta_size > ETH_RSS_RETA_SIZE_512) {
4475                 PMD_DRV_LOG(ERR,
4476                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4477                         reta_size, lut_size);
4478                 return -EINVAL;
4479         }
4480
4481         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4482         if (!lut) {
4483                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4484                 return -ENOMEM;
4485         }
4486         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4487         if (ret)
4488                 goto out;
4489         for (i = 0; i < reta_size; i++) {
4490                 idx = i / RTE_RETA_GROUP_SIZE;
4491                 shift = i % RTE_RETA_GROUP_SIZE;
4492                 if (reta_conf[idx].mask & (1ULL << shift))
4493                         lut[i] = reta_conf[idx].reta[shift];
4494         }
4495         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4496
4497         pf->adapter->rss_reta_updated = 1;
4498
4499 out:
4500         rte_free(lut);
4501
4502         return ret;
4503 }
4504
4505 static int
4506 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4507                         struct rte_eth_rss_reta_entry64 *reta_conf,
4508                         uint16_t reta_size)
4509 {
4510         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4511         uint16_t i, lut_size = pf->hash_lut_size;
4512         uint16_t idx, shift;
4513         uint8_t *lut;
4514         int ret;
4515
4516         if (reta_size != lut_size ||
4517                 reta_size > ETH_RSS_RETA_SIZE_512) {
4518                 PMD_DRV_LOG(ERR,
4519                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4520                         reta_size, lut_size);
4521                 return -EINVAL;
4522         }
4523
4524         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4525         if (!lut) {
4526                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4527                 return -ENOMEM;
4528         }
4529
4530         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4531         if (ret)
4532                 goto out;
4533         for (i = 0; i < reta_size; i++) {
4534                 idx = i / RTE_RETA_GROUP_SIZE;
4535                 shift = i % RTE_RETA_GROUP_SIZE;
4536                 if (reta_conf[idx].mask & (1ULL << shift))
4537                         reta_conf[idx].reta[shift] = lut[i];
4538         }
4539
4540 out:
4541         rte_free(lut);
4542
4543         return ret;
4544 }
4545
4546 /**
4547  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4548  * @hw:   pointer to the HW structure
4549  * @mem:  pointer to mem struct to fill out
4550  * @size: size of memory requested
4551  * @alignment: what to align the allocation to
4552  **/
4553 enum i40e_status_code
4554 i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4555                         struct i40e_dma_mem *mem,
4556                         u64 size,
4557                         u32 alignment)
4558 {
4559         static uint64_t i40e_dma_memzone_id;
4560         const struct rte_memzone *mz = NULL;
4561         char z_name[RTE_MEMZONE_NAMESIZE];
4562
4563         if (!mem)
4564                 return I40E_ERR_PARAM;
4565
4566         snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
4567                 __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
4568         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4569                         RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4570         if (!mz)
4571                 return I40E_ERR_NO_MEMORY;
4572
4573         mem->size = size;
4574         mem->va = mz->addr;
4575         mem->pa = mz->iova;
4576         mem->zone = (const void *)mz;
4577         PMD_DRV_LOG(DEBUG,
4578                 "memzone %s allocated with physical address: %"PRIu64,
4579                 mz->name, mem->pa);
4580
4581         return I40E_SUCCESS;
4582 }
4583
4584 /**
4585  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4586  * @hw:   pointer to the HW structure
4587  * @mem:  ptr to mem struct to free
4588  **/
4589 enum i40e_status_code
4590 i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4591                     struct i40e_dma_mem *mem)
4592 {
4593         if (!mem)
4594                 return I40E_ERR_PARAM;
4595
4596         PMD_DRV_LOG(DEBUG,
4597                 "memzone %s to be freed with physical address: %"PRIu64,
4598                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4599         rte_memzone_free((const struct rte_memzone *)mem->zone);
4600         mem->zone = NULL;
4601         mem->va = NULL;
4602         mem->pa = (u64)0;
4603
4604         return I40E_SUCCESS;
4605 }
4606
4607 /**
4608  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4609  * @hw:   pointer to the HW structure
4610  * @mem:  pointer to mem struct to fill out
4611  * @size: size of memory requested
4612  **/
4613 enum i40e_status_code
4614 i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4615                          struct i40e_virt_mem *mem,
4616                          u32 size)
4617 {
4618         if (!mem)
4619                 return I40E_ERR_PARAM;
4620
4621         mem->size = size;
4622         mem->va = rte_zmalloc("i40e", size, 0);
4623
4624         if (mem->va)
4625                 return I40E_SUCCESS;
4626         else
4627                 return I40E_ERR_NO_MEMORY;
4628 }
4629
4630 /**
4631  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4632  * @hw:   pointer to the HW structure
4633  * @mem:  pointer to mem struct to free
4634  **/
4635 enum i40e_status_code
4636 i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4637                      struct i40e_virt_mem *mem)
4638 {
4639         if (!mem)
4640                 return I40E_ERR_PARAM;
4641
4642         rte_free(mem->va);
4643         mem->va = NULL;
4644
4645         return I40E_SUCCESS;
4646 }
4647
4648 void
4649 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4650 {
4651         rte_spinlock_init(&sp->spinlock);
4652 }
4653
4654 void
4655 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4656 {
4657         rte_spinlock_lock(&sp->spinlock);
4658 }
4659
4660 void
4661 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4662 {
4663         rte_spinlock_unlock(&sp->spinlock);
4664 }
4665
4666 void
4667 i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
4668 {
4669         return;
4670 }
4671
4672 /**
4673  * Get the hardware capabilities, which will be parsed
4674  * and saved into struct i40e_hw.
4675  */
4676 static int
4677 i40e_get_cap(struct i40e_hw *hw)
4678 {
4679         struct i40e_aqc_list_capabilities_element_resp *buf;
4680         uint16_t len, size = 0;
4681         int ret;
4682
4683         /* Calculate a huge enough buff for saving response data temporarily */
4684         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4685                                                 I40E_MAX_CAP_ELE_NUM;
4686         buf = rte_zmalloc("i40e", len, 0);
4687         if (!buf) {
4688                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4689                 return I40E_ERR_NO_MEMORY;
4690         }
4691
4692         /* Get, parse the capabilities and save it to hw */
4693         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4694                         i40e_aqc_opc_list_func_capabilities, NULL);
4695         if (ret != I40E_SUCCESS)
4696                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4697
4698         /* Free the temporary buffer after being used */
4699         rte_free(buf);
4700
4701         return ret;
4702 }
4703
4704 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4705
4706 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4707                 const char *value,
4708                 void *opaque)
4709 {
4710         struct i40e_pf *pf;
4711         unsigned long num;
4712         char *end;
4713
4714         pf = (struct i40e_pf *)opaque;
4715         RTE_SET_USED(key);
4716
4717         errno = 0;
4718         num = strtoul(value, &end, 0);
4719         if (errno != 0 || end == value || *end != 0) {
4720                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4721                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4722                 return -(EINVAL);
4723         }
4724
4725         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4726                 pf->vf_nb_qp_max = (uint16_t)num;
4727         else
4728                 /* here return 0 to make next valid same argument work */
4729                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4730                             "power of 2 and equal or less than 16 !, Now it is "
4731                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4732
4733         return 0;
4734 }
4735
4736 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4737 {
4738         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4739         struct rte_kvargs *kvlist;
4740         int kvargs_count;
4741
4742         /* set default queue number per VF as 4 */
4743         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4744
4745         if (dev->device->devargs == NULL)
4746                 return 0;
4747
4748         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4749         if (kvlist == NULL)
4750                 return -(EINVAL);
4751
4752         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4753         if (!kvargs_count) {
4754                 rte_kvargs_free(kvlist);
4755                 return 0;
4756         }
4757
4758         if (kvargs_count > 1)
4759                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4760                             "the first invalid or last valid one is used !",
4761                             ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4762
4763         rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4764                            i40e_pf_parse_vf_queue_number_handler, pf);
4765
4766         rte_kvargs_free(kvlist);
4767
4768         return 0;
4769 }
4770
4771 static int
4772 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4773 {
4774         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4775         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4776         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4777         uint16_t qp_count = 0, vsi_count = 0;
4778
4779         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4780                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4781                 return -EINVAL;
4782         }
4783
4784         i40e_pf_config_vf_rxq_number(dev);
4785
4786         /* Add the parameter init for LFC */
4787         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4788         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4789         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4790
4791         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4792         pf->max_num_vsi = hw->func_caps.num_vsis;
4793         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4794         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4795
4796         /* FDir queue/VSI allocation */
4797         pf->fdir_qp_offset = 0;
4798         if (hw->func_caps.fd) {
4799                 pf->flags |= I40E_FLAG_FDIR;
4800                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4801         } else {
4802                 pf->fdir_nb_qps = 0;
4803         }
4804         qp_count += pf->fdir_nb_qps;
4805         vsi_count += 1;
4806
4807         /* LAN queue/VSI allocation */
4808         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4809         if (!hw->func_caps.rss) {
4810                 pf->lan_nb_qps = 1;
4811         } else {
4812                 pf->flags |= I40E_FLAG_RSS;
4813                 if (hw->mac.type == I40E_MAC_X722)
4814                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4815                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4816         }
4817         qp_count += pf->lan_nb_qps;
4818         vsi_count += 1;
4819
4820         /* VF queue/VSI allocation */
4821         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4822         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4823                 pf->flags |= I40E_FLAG_SRIOV;
4824                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4825                 pf->vf_num = pci_dev->max_vfs;
4826                 PMD_DRV_LOG(DEBUG,
4827                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4828                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4829         } else {
4830                 pf->vf_nb_qps = 0;
4831                 pf->vf_num = 0;
4832         }
4833         qp_count += pf->vf_nb_qps * pf->vf_num;
4834         vsi_count += pf->vf_num;
4835
4836         /* VMDq queue/VSI allocation */
4837         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4838         pf->vmdq_nb_qps = 0;
4839         pf->max_nb_vmdq_vsi = 0;
4840         if (hw->func_caps.vmdq) {
4841                 if (qp_count < hw->func_caps.num_tx_qp &&
4842                         vsi_count < hw->func_caps.num_vsis) {
4843                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4844                                 qp_count) / pf->vmdq_nb_qp_max;
4845
4846                         /* Limit the maximum number of VMDq vsi to the maximum
4847                          * ethdev can support
4848                          */
4849                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4850                                 hw->func_caps.num_vsis - vsi_count);
4851                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4852                                 ETH_64_POOLS);
4853                         if (pf->max_nb_vmdq_vsi) {
4854                                 pf->flags |= I40E_FLAG_VMDQ;
4855                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4856                                 PMD_DRV_LOG(DEBUG,
4857                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4858                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4859                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4860                         } else {
4861                                 PMD_DRV_LOG(INFO,
4862                                         "No enough queues left for VMDq");
4863                         }
4864                 } else {
4865                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4866                 }
4867         }
4868         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4869         vsi_count += pf->max_nb_vmdq_vsi;
4870
4871         if (hw->func_caps.dcb)
4872                 pf->flags |= I40E_FLAG_DCB;
4873
4874         if (qp_count > hw->func_caps.num_tx_qp) {
4875                 PMD_DRV_LOG(ERR,
4876                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4877                         qp_count, hw->func_caps.num_tx_qp);
4878                 return -EINVAL;
4879         }
4880         if (vsi_count > hw->func_caps.num_vsis) {
4881                 PMD_DRV_LOG(ERR,
4882                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4883                         vsi_count, hw->func_caps.num_vsis);
4884                 return -EINVAL;
4885         }
4886
4887         return 0;
4888 }
4889
4890 static int
4891 i40e_pf_get_switch_config(struct i40e_pf *pf)
4892 {
4893         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4894         struct i40e_aqc_get_switch_config_resp *switch_config;
4895         struct i40e_aqc_switch_config_element_resp *element;
4896         uint16_t start_seid = 0, num_reported;
4897         int ret;
4898
4899         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4900                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4901         if (!switch_config) {
4902                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4903                 return -ENOMEM;
4904         }
4905
4906         /* Get the switch configurations */
4907         ret = i40e_aq_get_switch_config(hw, switch_config,
4908                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4909         if (ret != I40E_SUCCESS) {
4910                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4911                 goto fail;
4912         }
4913         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4914         if (num_reported != 1) { /* The number should be 1 */
4915                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4916                 goto fail;
4917         }
4918
4919         /* Parse the switch configuration elements */
4920         element = &(switch_config->element[0]);
4921         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4922                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4923                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4924         } else
4925                 PMD_DRV_LOG(INFO, "Unknown element type");
4926
4927 fail:
4928         rte_free(switch_config);
4929
4930         return ret;
4931 }
4932
4933 static int
4934 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4935                         uint32_t num)
4936 {
4937         struct pool_entry *entry;
4938
4939         if (pool == NULL || num == 0)
4940                 return -EINVAL;
4941
4942         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4943         if (entry == NULL) {
4944                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4945                 return -ENOMEM;
4946         }
4947
4948         /* queue heap initialize */
4949         pool->num_free = num;
4950         pool->num_alloc = 0;
4951         pool->base = base;
4952         LIST_INIT(&pool->alloc_list);
4953         LIST_INIT(&pool->free_list);
4954
4955         /* Initialize element  */
4956         entry->base = 0;
4957         entry->len = num;
4958
4959         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4960         return 0;
4961 }
4962
4963 static void
4964 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4965 {
4966         struct pool_entry *entry, *next_entry;
4967
4968         if (pool == NULL)
4969                 return;
4970
4971         for (entry = LIST_FIRST(&pool->alloc_list);
4972                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4973                         entry = next_entry) {
4974                 LIST_REMOVE(entry, next);
4975                 rte_free(entry);
4976         }
4977
4978         for (entry = LIST_FIRST(&pool->free_list);
4979                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4980                         entry = next_entry) {
4981                 LIST_REMOVE(entry, next);
4982                 rte_free(entry);
4983         }
4984
4985         pool->num_free = 0;
4986         pool->num_alloc = 0;
4987         pool->base = 0;
4988         LIST_INIT(&pool->alloc_list);
4989         LIST_INIT(&pool->free_list);
4990 }
4991
4992 static int
4993 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4994                        uint32_t base)
4995 {
4996         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4997         uint32_t pool_offset;
4998         uint16_t len;
4999         int insert;
5000
5001         if (pool == NULL) {
5002                 PMD_DRV_LOG(ERR, "Invalid parameter");
5003                 return -EINVAL;
5004         }
5005
5006         pool_offset = base - pool->base;
5007         /* Lookup in alloc list */
5008         LIST_FOREACH(entry, &pool->alloc_list, next) {
5009                 if (entry->base == pool_offset) {
5010                         valid_entry = entry;
5011                         LIST_REMOVE(entry, next);
5012                         break;
5013                 }
5014         }
5015
5016         /* Not find, return */
5017         if (valid_entry == NULL) {
5018                 PMD_DRV_LOG(ERR, "Failed to find entry");
5019                 return -EINVAL;
5020         }
5021
5022         /**
5023          * Found it, move it to free list  and try to merge.
5024          * In order to make merge easier, always sort it by qbase.
5025          * Find adjacent prev and last entries.
5026          */
5027         prev = next = NULL;
5028         LIST_FOREACH(entry, &pool->free_list, next) {
5029                 if (entry->base > valid_entry->base) {
5030                         next = entry;
5031                         break;
5032                 }
5033                 prev = entry;
5034         }
5035
5036         insert = 0;
5037         len = valid_entry->len;
5038         /* Try to merge with next one*/
5039         if (next != NULL) {
5040                 /* Merge with next one */
5041                 if (valid_entry->base + len == next->base) {
5042                         next->base = valid_entry->base;
5043                         next->len += len;
5044                         rte_free(valid_entry);
5045                         valid_entry = next;
5046                         insert = 1;
5047                 }
5048         }
5049
5050         if (prev != NULL) {
5051                 /* Merge with previous one */
5052                 if (prev->base + prev->len == valid_entry->base) {
5053                         prev->len += len;
5054                         /* If it merge with next one, remove next node */
5055                         if (insert == 1) {
5056                                 LIST_REMOVE(valid_entry, next);
5057                                 rte_free(valid_entry);
5058                                 valid_entry = NULL;
5059                         } else {
5060                                 rte_free(valid_entry);
5061                                 valid_entry = NULL;
5062                                 insert = 1;
5063                         }
5064                 }
5065         }
5066
5067         /* Not find any entry to merge, insert */
5068         if (insert == 0) {
5069                 if (prev != NULL)
5070                         LIST_INSERT_AFTER(prev, valid_entry, next);
5071                 else if (next != NULL)
5072                         LIST_INSERT_BEFORE(next, valid_entry, next);
5073                 else /* It's empty list, insert to head */
5074                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5075         }
5076
5077         pool->num_free += len;
5078         pool->num_alloc -= len;
5079
5080         return 0;
5081 }
5082
5083 static int
5084 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5085                        uint16_t num)
5086 {
5087         struct pool_entry *entry, *valid_entry;
5088
5089         if (pool == NULL || num == 0) {
5090                 PMD_DRV_LOG(ERR, "Invalid parameter");
5091                 return -EINVAL;
5092         }
5093
5094         if (pool->num_free < num) {
5095                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5096                             num, pool->num_free);
5097                 return -ENOMEM;
5098         }
5099
5100         valid_entry = NULL;
5101         /* Lookup  in free list and find most fit one */
5102         LIST_FOREACH(entry, &pool->free_list, next) {
5103                 if (entry->len >= num) {
5104                         /* Find best one */
5105                         if (entry->len == num) {
5106                                 valid_entry = entry;
5107                                 break;
5108                         }
5109                         if (valid_entry == NULL || valid_entry->len > entry->len)
5110                                 valid_entry = entry;
5111                 }
5112         }
5113
5114         /* Not find one to satisfy the request, return */
5115         if (valid_entry == NULL) {
5116                 PMD_DRV_LOG(ERR, "No valid entry found");
5117                 return -ENOMEM;
5118         }
5119         /**
5120          * The entry have equal queue number as requested,
5121          * remove it from alloc_list.
5122          */
5123         if (valid_entry->len == num) {
5124                 LIST_REMOVE(valid_entry, next);
5125         } else {
5126                 /**
5127                  * The entry have more numbers than requested,
5128                  * create a new entry for alloc_list and minus its
5129                  * queue base and number in free_list.
5130                  */
5131                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5132                 if (entry == NULL) {
5133                         PMD_DRV_LOG(ERR,
5134                                 "Failed to allocate memory for resource pool");
5135                         return -ENOMEM;
5136                 }
5137                 entry->base = valid_entry->base;
5138                 entry->len = num;
5139                 valid_entry->base += num;
5140                 valid_entry->len -= num;
5141                 valid_entry = entry;
5142         }
5143
5144         /* Insert it into alloc list, not sorted */
5145         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5146
5147         pool->num_free -= valid_entry->len;
5148         pool->num_alloc += valid_entry->len;
5149
5150         return valid_entry->base + pool->base;
5151 }
5152
5153 /**
5154  * bitmap_is_subset - Check whether src2 is subset of src1
5155  **/
5156 static inline int
5157 bitmap_is_subset(uint8_t src1, uint8_t src2)
5158 {
5159         return !((src1 ^ src2) & src2);
5160 }
5161
5162 static enum i40e_status_code
5163 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5164 {
5165         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5166
5167         /* If DCB is not supported, only default TC is supported */
5168         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5169                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5170                 return I40E_NOT_SUPPORTED;
5171         }
5172
5173         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5174                 PMD_DRV_LOG(ERR,
5175                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
5176                         hw->func_caps.enabled_tcmap, enabled_tcmap);
5177                 return I40E_NOT_SUPPORTED;
5178         }
5179         return I40E_SUCCESS;
5180 }
5181
5182 int
5183 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5184                                 struct i40e_vsi_vlan_pvid_info *info)
5185 {
5186         struct i40e_hw *hw;
5187         struct i40e_vsi_context ctxt;
5188         uint8_t vlan_flags = 0;
5189         int ret;
5190
5191         if (vsi == NULL || info == NULL) {
5192                 PMD_DRV_LOG(ERR, "invalid parameters");
5193                 return I40E_ERR_PARAM;
5194         }
5195
5196         if (info->on) {
5197                 vsi->info.pvid = info->config.pvid;
5198                 /**
5199                  * If insert pvid is enabled, only tagged pkts are
5200                  * allowed to be sent out.
5201                  */
5202                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5203                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5204         } else {
5205                 vsi->info.pvid = 0;
5206                 if (info->config.reject.tagged == 0)
5207                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5208
5209                 if (info->config.reject.untagged == 0)
5210                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5211         }
5212         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5213                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
5214         vsi->info.port_vlan_flags |= vlan_flags;
5215         vsi->info.valid_sections =
5216                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5217         memset(&ctxt, 0, sizeof(ctxt));
5218         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5219         ctxt.seid = vsi->seid;
5220
5221         hw = I40E_VSI_TO_HW(vsi);
5222         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5223         if (ret != I40E_SUCCESS)
5224                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
5225
5226         return ret;
5227 }
5228
5229 static int
5230 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5231 {
5232         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5233         int i, ret;
5234         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5235
5236         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5237         if (ret != I40E_SUCCESS)
5238                 return ret;
5239
5240         if (!vsi->seid) {
5241                 PMD_DRV_LOG(ERR, "seid not valid");
5242                 return -EINVAL;
5243         }
5244
5245         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5246         tc_bw_data.tc_valid_bits = enabled_tcmap;
5247         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5248                 tc_bw_data.tc_bw_credits[i] =
5249                         (enabled_tcmap & (1 << i)) ? 1 : 0;
5250
5251         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5252         if (ret != I40E_SUCCESS) {
5253                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5254                 return ret;
5255         }
5256
5257         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5258                                         sizeof(vsi->info.qs_handle));
5259         return I40E_SUCCESS;
5260 }
5261
5262 static enum i40e_status_code
5263 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5264                                  struct i40e_aqc_vsi_properties_data *info,
5265                                  uint8_t enabled_tcmap)
5266 {
5267         enum i40e_status_code ret;
5268         int i, total_tc = 0;
5269         uint16_t qpnum_per_tc, bsf, qp_idx;
5270
5271         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5272         if (ret != I40E_SUCCESS)
5273                 return ret;
5274
5275         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5276                 if (enabled_tcmap & (1 << i))
5277                         total_tc++;
5278         if (total_tc == 0)
5279                 total_tc = 1;
5280         vsi->enabled_tc = enabled_tcmap;
5281
5282         /* Number of queues per enabled TC */
5283         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5284         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5285         bsf = rte_bsf32(qpnum_per_tc);
5286
5287         /* Adjust the queue number to actual queues that can be applied */
5288         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5289                 vsi->nb_qps = qpnum_per_tc * total_tc;
5290
5291         /**
5292          * Configure TC and queue mapping parameters, for enabled TC,
5293          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5294          * default queue will serve it.
5295          */
5296         qp_idx = 0;
5297         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5298                 if (vsi->enabled_tc & (1 << i)) {
5299                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5300                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5301                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5302                         qp_idx += qpnum_per_tc;
5303                 } else
5304                         info->tc_mapping[i] = 0;
5305         }
5306
5307         /* Associate queue number with VSI */
5308         if (vsi->type == I40E_VSI_SRIOV) {
5309                 info->mapping_flags |=
5310                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5311                 for (i = 0; i < vsi->nb_qps; i++)
5312                         info->queue_mapping[i] =
5313                                 rte_cpu_to_le_16(vsi->base_queue + i);
5314         } else {
5315                 info->mapping_flags |=
5316                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5317                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5318         }
5319         info->valid_sections |=
5320                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5321
5322         return I40E_SUCCESS;
5323 }
5324
5325 static int
5326 i40e_veb_release(struct i40e_veb *veb)
5327 {
5328         struct i40e_vsi *vsi;
5329         struct i40e_hw *hw;
5330
5331         if (veb == NULL)
5332                 return -EINVAL;
5333
5334         if (!TAILQ_EMPTY(&veb->head)) {
5335                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5336                 return -EACCES;
5337         }
5338         /* associate_vsi field is NULL for floating VEB */
5339         if (veb->associate_vsi != NULL) {
5340                 vsi = veb->associate_vsi;
5341                 hw = I40E_VSI_TO_HW(vsi);
5342
5343                 vsi->uplink_seid = veb->uplink_seid;
5344                 vsi->veb = NULL;
5345         } else {
5346                 veb->associate_pf->main_vsi->floating_veb = NULL;
5347                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5348         }
5349
5350         i40e_aq_delete_element(hw, veb->seid, NULL);
5351         rte_free(veb);
5352         return I40E_SUCCESS;
5353 }
5354
5355 /* Setup a veb */
5356 static struct i40e_veb *
5357 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5358 {
5359         struct i40e_veb *veb;
5360         int ret;
5361         struct i40e_hw *hw;
5362
5363         if (pf == NULL) {
5364                 PMD_DRV_LOG(ERR,
5365                             "veb setup failed, associated PF shouldn't null");
5366                 return NULL;
5367         }
5368         hw = I40E_PF_TO_HW(pf);
5369
5370         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5371         if (!veb) {
5372                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5373                 goto fail;
5374         }
5375
5376         veb->associate_vsi = vsi;
5377         veb->associate_pf = pf;
5378         TAILQ_INIT(&veb->head);
5379         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5380
5381         /* create floating veb if vsi is NULL */
5382         if (vsi != NULL) {
5383                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5384                                       I40E_DEFAULT_TCMAP, false,
5385                                       &veb->seid, false, NULL);
5386         } else {
5387                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5388                                       true, &veb->seid, false, NULL);
5389         }
5390
5391         if (ret != I40E_SUCCESS) {
5392                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5393                             hw->aq.asq_last_status);
5394                 goto fail;
5395         }
5396         veb->enabled_tc = I40E_DEFAULT_TCMAP;
5397
5398         /* get statistics index */
5399         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5400                                 &veb->stats_idx, NULL, NULL, NULL);
5401         if (ret != I40E_SUCCESS) {
5402                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5403                             hw->aq.asq_last_status);
5404                 goto fail;
5405         }
5406         /* Get VEB bandwidth, to be implemented */
5407         /* Now associated vsi binding to the VEB, set uplink to this VEB */
5408         if (vsi)
5409                 vsi->uplink_seid = veb->seid;
5410
5411         return veb;
5412 fail:
5413         rte_free(veb);
5414         return NULL;
5415 }
5416
5417 int
5418 i40e_vsi_release(struct i40e_vsi *vsi)
5419 {
5420         struct i40e_pf *pf;
5421         struct i40e_hw *hw;
5422         struct i40e_vsi_list *vsi_list;
5423         void *temp;
5424         int ret;
5425         struct i40e_mac_filter *f;
5426         uint16_t user_param;
5427
5428         if (!vsi)
5429                 return I40E_SUCCESS;
5430
5431         if (!vsi->adapter)
5432                 return -EFAULT;
5433
5434         user_param = vsi->user_param;
5435
5436         pf = I40E_VSI_TO_PF(vsi);
5437         hw = I40E_VSI_TO_HW(vsi);
5438
5439         /* VSI has child to attach, release child first */
5440         if (vsi->veb) {
5441                 RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5442                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5443                                 return -1;
5444                 }
5445                 i40e_veb_release(vsi->veb);
5446         }
5447
5448         if (vsi->floating_veb) {
5449                 RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head,
5450                         list, temp) {
5451                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5452                                 return -1;
5453                 }
5454         }
5455
5456         /* Remove all macvlan filters of the VSI */
5457         i40e_vsi_remove_all_macvlan_filter(vsi);
5458         RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5459                 rte_free(f);
5460
5461         if (vsi->type != I40E_VSI_MAIN &&
5462             ((vsi->type != I40E_VSI_SRIOV) ||
5463             !pf->floating_veb_list[user_param])) {
5464                 /* Remove vsi from parent's sibling list */
5465                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5466                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5467                         return I40E_ERR_PARAM;
5468                 }
5469                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5470                                 &vsi->sib_vsi_list, list);
5471
5472                 /* Remove all switch element of the VSI */
5473                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5474                 if (ret != I40E_SUCCESS)
5475                         PMD_DRV_LOG(ERR, "Failed to delete element");
5476         }
5477
5478         if ((vsi->type == I40E_VSI_SRIOV) &&
5479             pf->floating_veb_list[user_param]) {
5480                 /* Remove vsi from parent's sibling list */
5481                 if (vsi->parent_vsi == NULL ||
5482                     vsi->parent_vsi->floating_veb == NULL) {
5483                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5484                         return I40E_ERR_PARAM;
5485                 }
5486                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5487                              &vsi->sib_vsi_list, list);
5488
5489                 /* Remove all switch element of the VSI */
5490                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5491                 if (ret != I40E_SUCCESS)
5492                         PMD_DRV_LOG(ERR, "Failed to delete element");
5493         }
5494
5495         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5496
5497         if (vsi->type != I40E_VSI_SRIOV)
5498                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5499         rte_free(vsi);
5500
5501         return I40E_SUCCESS;
5502 }
5503
5504 static int
5505 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5506 {
5507         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5508         struct i40e_aqc_remove_macvlan_element_data def_filter;
5509         struct i40e_mac_filter_info filter;
5510         int ret;
5511
5512         if (vsi->type != I40E_VSI_MAIN)
5513                 return I40E_ERR_CONFIG;
5514         memset(&def_filter, 0, sizeof(def_filter));
5515         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5516                                         ETH_ADDR_LEN);
5517         def_filter.vlan_tag = 0;
5518         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5519                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5520         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5521         if (ret != I40E_SUCCESS) {
5522                 struct i40e_mac_filter *f;
5523                 struct rte_ether_addr *mac;
5524
5525                 PMD_DRV_LOG(DEBUG,
5526                             "Cannot remove the default macvlan filter");
5527                 /* It needs to add the permanent mac into mac list */
5528                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5529                 if (f == NULL) {
5530                         PMD_DRV_LOG(ERR, "failed to allocate memory");
5531                         return I40E_ERR_NO_MEMORY;
5532                 }
5533                 mac = &f->mac_info.mac_addr;
5534                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5535                                 ETH_ADDR_LEN);
5536                 f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5537                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5538                 vsi->mac_num++;
5539
5540                 return ret;
5541         }
5542         rte_memcpy(&filter.mac_addr,
5543                 (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5544         filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5545         return i40e_vsi_add_mac(vsi, &filter);
5546 }
5547
5548 /*
5549  * i40e_vsi_get_bw_config - Query VSI BW Information
5550  * @vsi: the VSI to be queried
5551  *
5552  * Returns 0 on success, negative value on failure
5553  */
5554 static enum i40e_status_code
5555 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5556 {
5557         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5558         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5559         struct i40e_hw *hw = &vsi->adapter->hw;
5560         i40e_status ret;
5561         int i;
5562         uint32_t bw_max;
5563
5564         memset(&bw_config, 0, sizeof(bw_config));
5565         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5566         if (ret != I40E_SUCCESS) {
5567                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5568                             hw->aq.asq_last_status);
5569                 return ret;
5570         }
5571
5572         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5573         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5574                                         &ets_sla_config, NULL);
5575         if (ret != I40E_SUCCESS) {
5576                 PMD_DRV_LOG(ERR,
5577                         "VSI failed to get TC bandwdith configuration %u",
5578                         hw->aq.asq_last_status);
5579                 return ret;
5580         }
5581
5582         /* store and print out BW info */
5583         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5584         vsi->bw_info.bw_max = bw_config.max_bw;
5585         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5586         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5587         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5588                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5589                      I40E_16_BIT_WIDTH);
5590         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5591                 vsi->bw_info.bw_ets_share_credits[i] =
5592                                 ets_sla_config.share_credits[i];
5593                 vsi->bw_info.bw_ets_credits[i] =
5594                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5595                 /* 4 bits per TC, 4th bit is reserved */
5596                 vsi->bw_info.bw_ets_max[i] =
5597                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5598                                   RTE_LEN2MASK(3, uint8_t));
5599                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5600                             vsi->bw_info.bw_ets_share_credits[i]);
5601                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5602                             vsi->bw_info.bw_ets_credits[i]);
5603                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5604                             vsi->bw_info.bw_ets_max[i]);
5605         }
5606
5607         return I40E_SUCCESS;
5608 }
5609
5610 /* i40e_enable_pf_lb
5611  * @pf: pointer to the pf structure
5612  *
5613  * allow loopback on pf
5614  */
5615 static inline void
5616 i40e_enable_pf_lb(struct i40e_pf *pf)
5617 {
5618         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5619         struct i40e_vsi_context ctxt;
5620         int ret;
5621
5622         /* Use the FW API if FW >= v5.0 */
5623         if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5624                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5625                 return;
5626         }
5627
5628         memset(&ctxt, 0, sizeof(ctxt));
5629         ctxt.seid = pf->main_vsi_seid;
5630         ctxt.pf_num = hw->pf_id;
5631         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5632         if (ret) {
5633                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5634                             ret, hw->aq.asq_last_status);
5635                 return;
5636         }
5637         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5638         ctxt.info.valid_sections =
5639                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5640         ctxt.info.switch_id |=
5641                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5642
5643         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5644         if (ret)
5645                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5646                             hw->aq.asq_last_status);
5647 }
5648
5649 /* Setup a VSI */
5650 struct i40e_vsi *
5651 i40e_vsi_setup(struct i40e_pf *pf,
5652                enum i40e_vsi_type type,
5653                struct i40e_vsi *uplink_vsi,
5654                uint16_t user_param)
5655 {
5656         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5657         struct i40e_vsi *vsi;
5658         struct i40e_mac_filter_info filter;
5659         int ret;
5660         struct i40e_vsi_context ctxt;
5661         struct rte_ether_addr broadcast =
5662                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5663
5664         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5665             uplink_vsi == NULL) {
5666                 PMD_DRV_LOG(ERR,
5667                         "VSI setup failed, VSI link shouldn't be NULL");
5668                 return NULL;
5669         }
5670
5671         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5672                 PMD_DRV_LOG(ERR,
5673                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5674                 return NULL;
5675         }
5676
5677         /* two situations
5678          * 1.type is not MAIN and uplink vsi is not NULL
5679          * If uplink vsi didn't setup VEB, create one first under veb field
5680          * 2.type is SRIOV and the uplink is NULL
5681          * If floating VEB is NULL, create one veb under floating veb field
5682          */
5683
5684         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5685             uplink_vsi->veb == NULL) {
5686                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5687
5688                 if (uplink_vsi->veb == NULL) {
5689                         PMD_DRV_LOG(ERR, "VEB setup failed");
5690                         return NULL;
5691                 }
5692                 /* set ALLOWLOOPBACk on pf, when veb is created */
5693                 i40e_enable_pf_lb(pf);
5694         }
5695
5696         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5697             pf->main_vsi->floating_veb == NULL) {
5698                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5699
5700                 if (pf->main_vsi->floating_veb == NULL) {
5701                         PMD_DRV_LOG(ERR, "VEB setup failed");
5702                         return NULL;
5703                 }
5704         }
5705
5706         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5707         if (!vsi) {
5708                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5709                 return NULL;
5710         }
5711         TAILQ_INIT(&vsi->mac_list);
5712         vsi->type = type;
5713         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5714         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5715         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5716         vsi->user_param = user_param;
5717         vsi->vlan_anti_spoof_on = 0;
5718         vsi->vlan_filter_on = 0;
5719         /* Allocate queues */
5720         switch (vsi->type) {
5721         case I40E_VSI_MAIN  :
5722                 vsi->nb_qps = pf->lan_nb_qps;
5723                 break;
5724         case I40E_VSI_SRIOV :
5725                 vsi->nb_qps = pf->vf_nb_qps;
5726                 break;
5727         case I40E_VSI_VMDQ2:
5728                 vsi->nb_qps = pf->vmdq_nb_qps;
5729                 break;
5730         case I40E_VSI_FDIR:
5731                 vsi->nb_qps = pf->fdir_nb_qps;
5732                 break;
5733         default:
5734                 goto fail_mem;
5735         }
5736         /*
5737          * The filter status descriptor is reported in rx queue 0,
5738          * while the tx queue for fdir filter programming has no
5739          * such constraints, can be non-zero queues.
5740          * To simplify it, choose FDIR vsi use queue 0 pair.
5741          * To make sure it will use queue 0 pair, queue allocation
5742          * need be done before this function is called
5743          */
5744         if (type != I40E_VSI_FDIR) {
5745                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5746                         if (ret < 0) {
5747                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5748                                                 vsi->seid, ret);
5749                                 goto fail_mem;
5750                         }
5751                         vsi->base_queue = ret;
5752         } else
5753                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5754
5755         /* VF has MSIX interrupt in VF range, don't allocate here */
5756         if (type == I40E_VSI_MAIN) {
5757                 if (pf->support_multi_driver) {
5758                         /* If support multi-driver, need to use INT0 instead of
5759                          * allocating from msix pool. The Msix pool is init from
5760                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5761                          * to 1 without calling i40e_res_pool_alloc.
5762                          */
5763                         vsi->msix_intr = 0;
5764                         vsi->nb_msix = 1;
5765                 } else {
5766                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5767                                                   RTE_MIN(vsi->nb_qps,
5768                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5769                         if (ret < 0) {
5770                                 PMD_DRV_LOG(ERR,
5771                                             "VSI MAIN %d get heap failed %d",
5772                                             vsi->seid, ret);
5773                                 goto fail_queue_alloc;
5774                         }
5775                         vsi->msix_intr = ret;
5776                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5777                                                RTE_MAX_RXTX_INTR_VEC_ID);
5778                 }
5779         } else if (type != I40E_VSI_SRIOV) {
5780                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5781                 if (ret < 0) {
5782                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5783                         if (type != I40E_VSI_FDIR)
5784                                 goto fail_queue_alloc;
5785                         vsi->msix_intr = 0;
5786                         vsi->nb_msix = 0;
5787                 } else {
5788                         vsi->msix_intr = ret;
5789                         vsi->nb_msix = 1;
5790                 }
5791         } else {
5792                 vsi->msix_intr = 0;
5793                 vsi->nb_msix = 0;
5794         }
5795
5796         /* Add VSI */
5797         if (type == I40E_VSI_MAIN) {
5798                 /* For main VSI, no need to add since it's default one */
5799                 vsi->uplink_seid = pf->mac_seid;
5800                 vsi->seid = pf->main_vsi_seid;
5801                 /* Bind queues with specific MSIX interrupt */
5802                 /**
5803                  * Needs 2 interrupt at least, one for misc cause which will
5804                  * enabled from OS side, Another for queues binding the
5805                  * interrupt from device side only.
5806                  */
5807
5808                 /* Get default VSI parameters from hardware */
5809                 memset(&ctxt, 0, sizeof(ctxt));
5810                 ctxt.seid = vsi->seid;
5811                 ctxt.pf_num = hw->pf_id;
5812                 ctxt.uplink_seid = vsi->uplink_seid;
5813                 ctxt.vf_num = 0;
5814                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5815                 if (ret != I40E_SUCCESS) {
5816                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5817                         goto fail_msix_alloc;
5818                 }
5819                 rte_memcpy(&vsi->info, &ctxt.info,
5820                         sizeof(struct i40e_aqc_vsi_properties_data));
5821                 vsi->vsi_id = ctxt.vsi_number;
5822                 vsi->info.valid_sections = 0;
5823
5824                 /* Configure tc, enabled TC0 only */
5825                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5826                         I40E_SUCCESS) {
5827                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5828                         goto fail_msix_alloc;
5829                 }
5830
5831                 /* TC, queue mapping */
5832                 memset(&ctxt, 0, sizeof(ctxt));
5833                 vsi->info.valid_sections |=
5834                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5835                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5836                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5837                 rte_memcpy(&ctxt.info, &vsi->info,
5838                         sizeof(struct i40e_aqc_vsi_properties_data));
5839                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5840                                                 I40E_DEFAULT_TCMAP);
5841                 if (ret != I40E_SUCCESS) {
5842                         PMD_DRV_LOG(ERR,
5843                                 "Failed to configure TC queue mapping");
5844                         goto fail_msix_alloc;
5845                 }
5846                 ctxt.seid = vsi->seid;
5847                 ctxt.pf_num = hw->pf_id;
5848                 ctxt.uplink_seid = vsi->uplink_seid;
5849                 ctxt.vf_num = 0;
5850
5851                 /* Update VSI parameters */
5852                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5853                 if (ret != I40E_SUCCESS) {
5854                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5855                         goto fail_msix_alloc;
5856                 }
5857
5858                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5859                                                 sizeof(vsi->info.tc_mapping));
5860                 rte_memcpy(&vsi->info.queue_mapping,
5861                                 &ctxt.info.queue_mapping,
5862                         sizeof(vsi->info.queue_mapping));
5863                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5864                 vsi->info.valid_sections = 0;
5865
5866                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5867                                 ETH_ADDR_LEN);
5868
5869                 /**
5870                  * Updating default filter settings are necessary to prevent
5871                  * reception of tagged packets.
5872                  * Some old firmware configurations load a default macvlan
5873                  * filter which accepts both tagged and untagged packets.
5874                  * The updating is to use a normal filter instead if needed.
5875                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5876                  * The firmware with correct configurations load the default
5877                  * macvlan filter which is expected and cannot be removed.
5878                  */
5879                 i40e_update_default_filter_setting(vsi);
5880                 i40e_config_qinq(hw, vsi);
5881         } else if (type == I40E_VSI_SRIOV) {
5882                 memset(&ctxt, 0, sizeof(ctxt));
5883                 /**
5884                  * For other VSI, the uplink_seid equals to uplink VSI's
5885                  * uplink_seid since they share same VEB
5886                  */
5887                 if (uplink_vsi == NULL)
5888                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5889                 else
5890                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5891                 ctxt.pf_num = hw->pf_id;
5892                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5893                 ctxt.uplink_seid = vsi->uplink_seid;
5894                 ctxt.connection_type = 0x1;
5895                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5896
5897                 /* Use the VEB configuration if FW >= v5.0 */
5898                 if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
5899                         /* Configure switch ID */
5900                         ctxt.info.valid_sections |=
5901                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5902                         ctxt.info.switch_id =
5903                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5904                 }
5905
5906                 /* Configure port/vlan */
5907                 ctxt.info.valid_sections |=
5908                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5909                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5910                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5911                                                 hw->func_caps.enabled_tcmap);
5912                 if (ret != I40E_SUCCESS) {
5913                         PMD_DRV_LOG(ERR,
5914                                 "Failed to configure TC queue mapping");
5915                         goto fail_msix_alloc;
5916                 }
5917
5918                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5919                 ctxt.info.valid_sections |=
5920                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5921                 /**
5922                  * Since VSI is not created yet, only configure parameter,
5923                  * will add vsi below.
5924                  */
5925
5926                 i40e_config_qinq(hw, vsi);
5927         } else if (type == I40E_VSI_VMDQ2) {
5928                 memset(&ctxt, 0, sizeof(ctxt));
5929                 /*
5930                  * For other VSI, the uplink_seid equals to uplink VSI's
5931                  * uplink_seid since they share same VEB
5932                  */
5933                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5934                 ctxt.pf_num = hw->pf_id;
5935                 ctxt.vf_num = 0;
5936                 ctxt.uplink_seid = vsi->uplink_seid;
5937                 ctxt.connection_type = 0x1;
5938                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5939
5940                 ctxt.info.valid_sections |=
5941                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5942                 /* user_param carries flag to enable loop back */
5943                 if (user_param) {
5944                         ctxt.info.switch_id =
5945                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5946                         ctxt.info.switch_id |=
5947                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5948                 }
5949
5950                 /* Configure port/vlan */
5951                 ctxt.info.valid_sections |=
5952                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5953                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5954                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5955                                                 I40E_DEFAULT_TCMAP);
5956                 if (ret != I40E_SUCCESS) {
5957                         PMD_DRV_LOG(ERR,
5958                                 "Failed to configure TC queue mapping");
5959                         goto fail_msix_alloc;
5960                 }
5961                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5962                 ctxt.info.valid_sections |=
5963                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5964         } else if (type == I40E_VSI_FDIR) {
5965                 memset(&ctxt, 0, sizeof(ctxt));
5966                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5967                 ctxt.pf_num = hw->pf_id;
5968                 ctxt.vf_num = 0;
5969                 ctxt.uplink_seid = vsi->uplink_seid;
5970                 ctxt.connection_type = 0x1;     /* regular data port */
5971                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5972                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5973                                                 I40E_DEFAULT_TCMAP);
5974                 if (ret != I40E_SUCCESS) {
5975                         PMD_DRV_LOG(ERR,
5976                                 "Failed to configure TC queue mapping.");
5977                         goto fail_msix_alloc;
5978                 }
5979                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5980                 ctxt.info.valid_sections |=
5981                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5982         } else {
5983                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5984                 goto fail_msix_alloc;
5985         }
5986
5987         if (vsi->type != I40E_VSI_MAIN) {
5988                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5989                 if (ret != I40E_SUCCESS) {
5990                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5991                                     hw->aq.asq_last_status);
5992                         goto fail_msix_alloc;
5993                 }
5994                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5995                 vsi->info.valid_sections = 0;
5996                 vsi->seid = ctxt.seid;
5997                 vsi->vsi_id = ctxt.vsi_number;
5998                 vsi->sib_vsi_list.vsi = vsi;
5999                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
6000                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
6001                                           &vsi->sib_vsi_list, list);
6002                 } else {
6003                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
6004                                           &vsi->sib_vsi_list, list);
6005                 }
6006         }
6007
6008         /* MAC/VLAN configuration */
6009         rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
6010         filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
6011
6012         ret = i40e_vsi_add_mac(vsi, &filter);
6013         if (ret != I40E_SUCCESS) {
6014                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
6015                 goto fail_msix_alloc;
6016         }
6017
6018         /* Get VSI BW information */
6019         i40e_vsi_get_bw_config(vsi);
6020         return vsi;
6021 fail_msix_alloc:
6022         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
6023 fail_queue_alloc:
6024         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
6025 fail_mem:
6026         rte_free(vsi);
6027         return NULL;
6028 }
6029
6030 /* Configure vlan filter on or off */
6031 int
6032 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
6033 {
6034         int i, num;
6035         struct i40e_mac_filter *f;
6036         void *temp;
6037         struct i40e_mac_filter_info *mac_filter;
6038         enum i40e_mac_filter_type desired_filter;
6039         int ret = I40E_SUCCESS;
6040
6041         if (on) {
6042                 /* Filter to match MAC and VLAN */
6043                 desired_filter = I40E_MACVLAN_PERFECT_MATCH;
6044         } else {
6045                 /* Filter to match only MAC */
6046                 desired_filter = I40E_MAC_PERFECT_MATCH;
6047         }
6048
6049         num = vsi->mac_num;
6050
6051         mac_filter = rte_zmalloc("mac_filter_info_data",
6052                                  num * sizeof(*mac_filter), 0);
6053         if (mac_filter == NULL) {
6054                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6055                 return I40E_ERR_NO_MEMORY;
6056         }
6057
6058         i = 0;
6059
6060         /* Remove all existing mac */
6061         RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6062                 mac_filter[i] = f->mac_info;
6063                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6064                 if (ret) {
6065                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6066                                     on ? "enable" : "disable");
6067                         goto DONE;
6068                 }
6069                 i++;
6070         }
6071
6072         /* Override with new filter */
6073         for (i = 0; i < num; i++) {
6074                 mac_filter[i].filter_type = desired_filter;
6075                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6076                 if (ret) {
6077                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6078                                     on ? "enable" : "disable");
6079                         goto DONE;
6080                 }
6081         }
6082
6083 DONE:
6084         rte_free(mac_filter);
6085         return ret;
6086 }
6087
6088 /* Configure vlan stripping on or off */
6089 int
6090 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6091 {
6092         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6093         struct i40e_vsi_context ctxt;
6094         uint8_t vlan_flags;
6095         int ret = I40E_SUCCESS;
6096
6097         /* Check if it has been already on or off */
6098         if (vsi->info.valid_sections &
6099                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6100                 if (on) {
6101                         if ((vsi->info.port_vlan_flags &
6102                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6103                                 return 0; /* already on */
6104                 } else {
6105                         if ((vsi->info.port_vlan_flags &
6106                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6107                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
6108                                 return 0; /* already off */
6109                 }
6110         }
6111
6112         if (on)
6113                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6114         else
6115                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6116         vsi->info.valid_sections =
6117                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6118         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6119         vsi->info.port_vlan_flags |= vlan_flags;
6120         ctxt.seid = vsi->seid;
6121         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6122         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6123         if (ret)
6124                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6125                             on ? "enable" : "disable");
6126
6127         return ret;
6128 }
6129
6130 static int
6131 i40e_dev_init_vlan(struct rte_eth_dev *dev)
6132 {
6133         struct rte_eth_dev_data *data = dev->data;
6134         int ret;
6135         int mask = 0;
6136
6137         /* Apply vlan offload setting */
6138         mask = ETH_VLAN_STRIP_MASK |
6139                ETH_QINQ_STRIP_MASK |
6140                ETH_VLAN_FILTER_MASK |
6141                ETH_VLAN_EXTEND_MASK;
6142         ret = i40e_vlan_offload_set(dev, mask);
6143         if (ret) {
6144                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6145                 return ret;
6146         }
6147
6148         /* Apply pvid setting */
6149         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6150                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
6151         if (ret)
6152                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
6153
6154         return ret;
6155 }
6156
6157 static int
6158 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6159 {
6160         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6161
6162         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6163 }
6164
6165 static int
6166 i40e_update_flow_control(struct i40e_hw *hw)
6167 {
6168 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6169         struct i40e_link_status link_status;
6170         uint32_t rxfc = 0, txfc = 0, reg;
6171         uint8_t an_info;
6172         int ret;
6173
6174         memset(&link_status, 0, sizeof(link_status));
6175         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6176         if (ret != I40E_SUCCESS) {
6177                 PMD_DRV_LOG(ERR, "Failed to get link status information");
6178                 goto write_reg; /* Disable flow control */
6179         }
6180
6181         an_info = hw->phy.link_info.an_info;
6182         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6183                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6184                 ret = I40E_ERR_NOT_READY;
6185                 goto write_reg; /* Disable flow control */
6186         }
6187         /**
6188          * If link auto negotiation is enabled, flow control needs to
6189          * be configured according to it
6190          */
6191         switch (an_info & I40E_LINK_PAUSE_RXTX) {
6192         case I40E_LINK_PAUSE_RXTX:
6193                 rxfc = 1;
6194                 txfc = 1;
6195                 hw->fc.current_mode = I40E_FC_FULL;
6196                 break;
6197         case I40E_AQ_LINK_PAUSE_RX:
6198                 rxfc = 1;
6199                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
6200                 break;
6201         case I40E_AQ_LINK_PAUSE_TX:
6202                 txfc = 1;
6203                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
6204                 break;
6205         default:
6206                 hw->fc.current_mode = I40E_FC_NONE;
6207                 break;
6208         }
6209
6210 write_reg:
6211         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6212                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6213         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6214         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6215         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6216         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6217
6218         return ret;
6219 }
6220
6221 /* PF setup */
6222 static int
6223 i40e_pf_setup(struct i40e_pf *pf)
6224 {
6225         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6226         struct i40e_filter_control_settings settings;
6227         struct i40e_vsi *vsi;
6228         int ret;
6229
6230         /* Clear all stats counters */
6231         pf->offset_loaded = FALSE;
6232         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6233         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6234         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6235         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6236
6237         ret = i40e_pf_get_switch_config(pf);
6238         if (ret != I40E_SUCCESS) {
6239                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6240                 return ret;
6241         }
6242
6243         ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6244         if (ret)
6245                 PMD_INIT_LOG(WARNING,
6246                         "failed to allocate switch domain for device %d", ret);
6247
6248         if (pf->flags & I40E_FLAG_FDIR) {
6249                 /* make queue allocated first, let FDIR use queue pair 0*/
6250                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6251                 if (ret != I40E_FDIR_QUEUE_ID) {
6252                         PMD_DRV_LOG(ERR,
6253                                 "queue allocation fails for FDIR: ret =%d",
6254                                 ret);
6255                         pf->flags &= ~I40E_FLAG_FDIR;
6256                 }
6257         }
6258         /*  main VSI setup */
6259         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6260         if (!vsi) {
6261                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6262                 return I40E_ERR_NOT_READY;
6263         }
6264         pf->main_vsi = vsi;
6265
6266         /* Configure filter control */
6267         memset(&settings, 0, sizeof(settings));
6268         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6269                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6270         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6271                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6272         else {
6273                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6274                         hw->func_caps.rss_table_size);
6275                 return I40E_ERR_PARAM;
6276         }
6277         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6278                 hw->func_caps.rss_table_size);
6279         pf->hash_lut_size = hw->func_caps.rss_table_size;
6280
6281         /* Enable ethtype and macvlan filters */
6282         settings.enable_ethtype = TRUE;
6283         settings.enable_macvlan = TRUE;
6284         ret = i40e_set_filter_control(hw, &settings);
6285         if (ret)
6286                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6287                                                                 ret);
6288
6289         /* Update flow control according to the auto negotiation */
6290         i40e_update_flow_control(hw);
6291
6292         return I40E_SUCCESS;
6293 }
6294
6295 int
6296 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6297 {
6298         uint32_t reg;
6299         uint16_t j;
6300
6301         /**
6302          * Set or clear TX Queue Disable flags,
6303          * which is required by hardware.
6304          */
6305         i40e_pre_tx_queue_cfg(hw, q_idx, on);
6306         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6307
6308         /* Wait until the request is finished */
6309         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6310                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6311                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6312                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6313                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6314                                                         & 0x1))) {
6315                         break;
6316                 }
6317         }
6318         if (on) {
6319                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6320                         return I40E_SUCCESS; /* already on, skip next steps */
6321
6322                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6323                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6324         } else {
6325                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6326                         return I40E_SUCCESS; /* already off, skip next steps */
6327                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6328         }
6329         /* Write the register */
6330         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6331         /* Check the result */
6332         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6333                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6334                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6335                 if (on) {
6336                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6337                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
6338                                 break;
6339                 } else {
6340                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6341                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6342                                 break;
6343                 }
6344         }
6345         /* Check if it is timeout */
6346         if (j >= I40E_CHK_Q_ENA_COUNT) {
6347                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6348                             (on ? "enable" : "disable"), q_idx);
6349                 return I40E_ERR_TIMEOUT;
6350         }
6351
6352         return I40E_SUCCESS;
6353 }
6354
6355 int
6356 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6357 {
6358         uint32_t reg;
6359         uint16_t j;
6360
6361         /* Wait until the request is finished */
6362         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6363                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6364                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6365                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6366                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6367                         break;
6368         }
6369
6370         if (on) {
6371                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6372                         return I40E_SUCCESS; /* Already on, skip next steps */
6373                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6374         } else {
6375                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6376                         return I40E_SUCCESS; /* Already off, skip next steps */
6377                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6378         }
6379
6380         /* Write the register */
6381         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6382         /* Check the result */
6383         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6384                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6385                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6386                 if (on) {
6387                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6388                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
6389                                 break;
6390                 } else {
6391                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6392                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6393                                 break;
6394                 }
6395         }
6396
6397         /* Check if it is timeout */
6398         if (j >= I40E_CHK_Q_ENA_COUNT) {
6399                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6400                             (on ? "enable" : "disable"), q_idx);
6401                 return I40E_ERR_TIMEOUT;
6402         }
6403
6404         return I40E_SUCCESS;
6405 }
6406
6407 /* Initialize VSI for TX */
6408 static int
6409 i40e_dev_tx_init(struct i40e_pf *pf)
6410 {
6411         struct rte_eth_dev_data *data = pf->dev_data;
6412         uint16_t i;
6413         uint32_t ret = I40E_SUCCESS;
6414         struct i40e_tx_queue *txq;
6415
6416         for (i = 0; i < data->nb_tx_queues; i++) {
6417                 txq = data->tx_queues[i];
6418                 if (!txq || !txq->q_set)
6419                         continue;
6420                 ret = i40e_tx_queue_init(txq);
6421                 if (ret != I40E_SUCCESS)
6422                         break;
6423         }
6424         if (ret == I40E_SUCCESS)
6425                 i40e_set_tx_function(&rte_eth_devices[pf->dev_data->port_id]);
6426
6427         return ret;
6428 }
6429
6430 /* Initialize VSI for RX */
6431 static int
6432 i40e_dev_rx_init(struct i40e_pf *pf)
6433 {
6434         struct rte_eth_dev_data *data = pf->dev_data;
6435         int ret = I40E_SUCCESS;
6436         uint16_t i;
6437         struct i40e_rx_queue *rxq;
6438
6439         i40e_pf_config_rss(pf);
6440         for (i = 0; i < data->nb_rx_queues; i++) {
6441                 rxq = data->rx_queues[i];
6442                 if (!rxq || !rxq->q_set)
6443                         continue;
6444
6445                 ret = i40e_rx_queue_init(rxq);
6446                 if (ret != I40E_SUCCESS) {
6447                         PMD_DRV_LOG(ERR,
6448                                 "Failed to do RX queue initialization");
6449                         break;
6450                 }
6451         }
6452         if (ret == I40E_SUCCESS)
6453                 i40e_set_rx_function(&rte_eth_devices[pf->dev_data->port_id]);
6454
6455         return ret;
6456 }
6457
6458 static int
6459 i40e_dev_rxtx_init(struct i40e_pf *pf)
6460 {
6461         int err;
6462
6463         err = i40e_dev_tx_init(pf);
6464         if (err) {
6465                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6466                 return err;
6467         }
6468         err = i40e_dev_rx_init(pf);
6469         if (err) {
6470                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6471                 return err;
6472         }
6473
6474         return err;
6475 }
6476
6477 static int
6478 i40e_vmdq_setup(struct rte_eth_dev *dev)
6479 {
6480         struct rte_eth_conf *conf = &dev->data->dev_conf;
6481         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6482         int i, err, conf_vsis, j, loop;
6483         struct i40e_vsi *vsi;
6484         struct i40e_vmdq_info *vmdq_info;
6485         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6486         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6487
6488         /*
6489          * Disable interrupt to avoid message from VF. Furthermore, it will
6490          * avoid race condition in VSI creation/destroy.
6491          */
6492         i40e_pf_disable_irq0(hw);
6493
6494         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6495                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6496                 return -ENOTSUP;
6497         }
6498
6499         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6500         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6501                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6502                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6503                         pf->max_nb_vmdq_vsi);
6504                 return -ENOTSUP;
6505         }
6506
6507         if (pf->vmdq != NULL) {
6508                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6509                 return 0;
6510         }
6511
6512         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6513                                 sizeof(*vmdq_info) * conf_vsis, 0);
6514
6515         if (pf->vmdq == NULL) {
6516                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6517                 return -ENOMEM;
6518         }
6519
6520         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6521
6522         /* Create VMDQ VSI */
6523         for (i = 0; i < conf_vsis; i++) {
6524                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6525                                 vmdq_conf->enable_loop_back);
6526                 if (vsi == NULL) {
6527                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6528                         err = -1;
6529                         goto err_vsi_setup;
6530                 }
6531                 vmdq_info = &pf->vmdq[i];
6532                 vmdq_info->pf = pf;
6533                 vmdq_info->vsi = vsi;
6534         }
6535         pf->nb_cfg_vmdq_vsi = conf_vsis;
6536
6537         /* Configure Vlan */
6538         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6539         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6540                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6541                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6542                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6543                                         vmdq_conf->pool_map[i].vlan_id, j);
6544
6545                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6546                                                 vmdq_conf->pool_map[i].vlan_id);
6547                                 if (err) {
6548                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6549                                         err = -1;
6550                                         goto err_vsi_setup;
6551                                 }
6552                         }
6553                 }
6554         }
6555
6556         i40e_pf_enable_irq0(hw);
6557
6558         return 0;
6559
6560 err_vsi_setup:
6561         for (i = 0; i < conf_vsis; i++)
6562                 if (pf->vmdq[i].vsi == NULL)
6563                         break;
6564                 else
6565                         i40e_vsi_release(pf->vmdq[i].vsi);
6566
6567         rte_free(pf->vmdq);
6568         pf->vmdq = NULL;
6569         i40e_pf_enable_irq0(hw);
6570         return err;
6571 }
6572
6573 static void
6574 i40e_stat_update_32(struct i40e_hw *hw,
6575                    uint32_t reg,
6576                    bool offset_loaded,
6577                    uint64_t *offset,
6578                    uint64_t *stat)
6579 {
6580         uint64_t new_data;
6581
6582         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6583         if (!offset_loaded)
6584                 *offset = new_data;
6585
6586         if (new_data >= *offset)
6587                 *stat = (uint64_t)(new_data - *offset);
6588         else
6589                 *stat = (uint64_t)((new_data +
6590                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6591 }
6592
6593 static void
6594 i40e_stat_update_48(struct i40e_hw *hw,
6595                    uint32_t hireg,
6596                    uint32_t loreg,
6597                    bool offset_loaded,
6598                    uint64_t *offset,
6599                    uint64_t *stat)
6600 {
6601         uint64_t new_data;
6602
6603         if (hw->device_id == I40E_DEV_ID_QEMU) {
6604                 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6605                 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6606                                 I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6607         } else {
6608                 new_data = I40E_READ_REG64(hw, loreg);
6609         }
6610
6611         if (!offset_loaded)
6612                 *offset = new_data;
6613
6614         if (new_data >= *offset)
6615                 *stat = new_data - *offset;
6616         else
6617                 *stat = (uint64_t)((new_data +
6618                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6619
6620         *stat &= I40E_48_BIT_MASK;
6621 }
6622
6623 /* Disable IRQ0 */
6624 void
6625 i40e_pf_disable_irq0(struct i40e_hw *hw)
6626 {
6627         /* Disable all interrupt types */
6628         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6629                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6630         I40E_WRITE_FLUSH(hw);
6631 }
6632
6633 /* Enable IRQ0 */
6634 void
6635 i40e_pf_enable_irq0(struct i40e_hw *hw)
6636 {
6637         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6638                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6639                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6640                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6641         I40E_WRITE_FLUSH(hw);
6642 }
6643
6644 static void
6645 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6646 {
6647         /* read pending request and disable first */
6648         i40e_pf_disable_irq0(hw);
6649         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6650         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6651                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6652
6653         if (no_queue)
6654                 /* Link no queues with irq0 */
6655                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6656                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6657 }
6658
6659 static void
6660 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6661 {
6662         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6663         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6664         int i;
6665         uint16_t abs_vf_id;
6666         uint32_t index, offset, val;
6667
6668         if (!pf->vfs)
6669                 return;
6670         /**
6671          * Try to find which VF trigger a reset, use absolute VF id to access
6672          * since the reg is global register.
6673          */
6674         for (i = 0; i < pf->vf_num; i++) {
6675                 abs_vf_id = hw->func_caps.vf_base_id + i;
6676                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6677                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6678                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6679                 /* VFR event occurred */
6680                 if (val & (0x1 << offset)) {
6681                         int ret;
6682
6683                         /* Clear the event first */
6684                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6685                                                         (0x1 << offset));
6686                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6687                         /**
6688                          * Only notify a VF reset event occurred,
6689                          * don't trigger another SW reset
6690                          */
6691                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6692                         if (ret != I40E_SUCCESS)
6693                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6694                 }
6695         }
6696 }
6697
6698 static void
6699 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6700 {
6701         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6702         int i;
6703
6704         for (i = 0; i < pf->vf_num; i++)
6705                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6706 }
6707
6708 static void
6709 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6710 {
6711         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6712         struct i40e_arq_event_info info;
6713         uint16_t pending, opcode;
6714         int ret;
6715
6716         info.buf_len = I40E_AQ_BUF_SZ;
6717         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6718         if (!info.msg_buf) {
6719                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6720                 return;
6721         }
6722
6723         pending = 1;
6724         while (pending) {
6725                 ret = i40e_clean_arq_element(hw, &info, &pending);
6726
6727                 if (ret != I40E_SUCCESS) {
6728                         PMD_DRV_LOG(INFO,
6729                                 "Failed to read msg from AdminQ, aq_err: %u",
6730                                 hw->aq.asq_last_status);
6731                         break;
6732                 }
6733                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6734
6735                 switch (opcode) {
6736                 case i40e_aqc_opc_send_msg_to_pf:
6737                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6738                         i40e_pf_host_handle_vf_msg(dev,
6739                                         rte_le_to_cpu_16(info.desc.retval),
6740                                         rte_le_to_cpu_32(info.desc.cookie_high),
6741                                         rte_le_to_cpu_32(info.desc.cookie_low),
6742                                         info.msg_buf,
6743                                         info.msg_len);
6744                         break;
6745                 case i40e_aqc_opc_get_link_status:
6746                         ret = i40e_dev_link_update(dev, 0);
6747                         if (!ret)
6748                                 rte_eth_dev_callback_process(dev,
6749                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6750                         break;
6751                 default:
6752                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6753                                     opcode);
6754                         break;
6755                 }
6756         }
6757         rte_free(info.msg_buf);
6758 }
6759
6760 static void
6761 i40e_handle_mdd_event(struct rte_eth_dev *dev)
6762 {
6763 #define I40E_MDD_CLEAR32 0xFFFFFFFF
6764 #define I40E_MDD_CLEAR16 0xFFFF
6765         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6766         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6767         bool mdd_detected = false;
6768         struct i40e_pf_vf *vf;
6769         uint32_t reg;
6770         int i;
6771
6772         /* find what triggered the MDD event */
6773         reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6774         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6775                 uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6776                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6777                 uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6778                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6779                 uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6780                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6781                 uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6782                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6783                                         hw->func_caps.base_queue;
6784                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6785                         "queue %d PF number 0x%02x VF number 0x%02x device %s\n",
6786                                 event, queue, pf_num, vf_num, dev->data->name);
6787                 I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6788                 mdd_detected = true;
6789         }
6790         reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6791         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6792                 uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6793                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6794                 uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6795                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6796                 uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6797                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6798                                         hw->func_caps.base_queue;
6799
6800                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6801                                 "queue %d of function 0x%02x device %s\n",
6802                                         event, queue, func, dev->data->name);
6803                 I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6804                 mdd_detected = true;
6805         }
6806
6807         if (mdd_detected) {
6808                 reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6809                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6810                         I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6811                         PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
6812                 }
6813                 reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6814                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6815                         I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6816                                         I40E_MDD_CLEAR16);
6817                         PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
6818                 }
6819         }
6820
6821         /* see if one of the VFs needs its hand slapped */
6822         for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6823                 vf = &pf->vfs[i];
6824                 reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6825                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6826                         I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
6827                                         I40E_MDD_CLEAR16);
6828                         vf->num_mdd_events++;
6829                         PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
6830                                         PRIu64 "times\n",
6831                                         i, vf->num_mdd_events);
6832                 }
6833
6834                 reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
6835                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6836                         I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
6837                                         I40E_MDD_CLEAR16);
6838                         vf->num_mdd_events++;
6839                         PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
6840                                         PRIu64 "times\n",
6841                                         i, vf->num_mdd_events);
6842                 }
6843         }
6844 }
6845
6846 /**
6847  * Interrupt handler triggered by NIC  for handling
6848  * specific interrupt.
6849  *
6850  * @param handle
6851  *  Pointer to interrupt handle.
6852  * @param param
6853  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6854  *
6855  * @return
6856  *  void
6857  */
6858 static void
6859 i40e_dev_interrupt_handler(void *param)
6860 {
6861         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6862         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6863         uint32_t icr0;
6864
6865         /* Disable interrupt */
6866         i40e_pf_disable_irq0(hw);
6867
6868         /* read out interrupt causes */
6869         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6870
6871         /* No interrupt event indicated */
6872         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6873                 PMD_DRV_LOG(INFO, "No interrupt event");
6874                 goto done;
6875         }
6876         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6877                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6878         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6879                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6880                 i40e_handle_mdd_event(dev);
6881         }
6882         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6883                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6884         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6885                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6886         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6887                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6888         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6889                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6890         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6891                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6892
6893         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6894                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6895                 i40e_dev_handle_vfr_event(dev);
6896         }
6897         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6898                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6899                 i40e_dev_handle_aq_msg(dev);
6900         }
6901
6902 done:
6903         /* Enable interrupt */
6904         i40e_pf_enable_irq0(hw);
6905 }
6906
6907 static void
6908 i40e_dev_alarm_handler(void *param)
6909 {
6910         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6911         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6912         uint32_t icr0;
6913
6914         /* Disable interrupt */
6915         i40e_pf_disable_irq0(hw);
6916
6917         /* read out interrupt causes */
6918         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6919
6920         /* No interrupt event indicated */
6921         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
6922                 goto done;
6923         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6924                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6925         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6926                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6927                 i40e_handle_mdd_event(dev);
6928         }
6929         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6930                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6931         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6932                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6933         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6934                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6935         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6936                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6937         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6938                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6939
6940         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6941                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6942                 i40e_dev_handle_vfr_event(dev);
6943         }
6944         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6945                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6946                 i40e_dev_handle_aq_msg(dev);
6947         }
6948
6949 done:
6950         /* Enable interrupt */
6951         i40e_pf_enable_irq0(hw);
6952         rte_eal_alarm_set(I40E_ALARM_INTERVAL,
6953                           i40e_dev_alarm_handler, dev);
6954 }
6955
6956 int
6957 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6958                          struct i40e_macvlan_filter *filter,
6959                          int total)
6960 {
6961         int ele_num, ele_buff_size;
6962         int num, actual_num, i;
6963         uint16_t flags;
6964         int ret = I40E_SUCCESS;
6965         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6966         struct i40e_aqc_add_macvlan_element_data *req_list;
6967
6968         if (filter == NULL  || total == 0)
6969                 return I40E_ERR_PARAM;
6970         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6971         ele_buff_size = hw->aq.asq_buf_size;
6972
6973         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6974         if (req_list == NULL) {
6975                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6976                 return I40E_ERR_NO_MEMORY;
6977         }
6978
6979         num = 0;
6980         do {
6981                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6982                 memset(req_list, 0, ele_buff_size);
6983
6984                 for (i = 0; i < actual_num; i++) {
6985                         rte_memcpy(req_list[i].mac_addr,
6986                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6987                         req_list[i].vlan_tag =
6988                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6989
6990                         switch (filter[num + i].filter_type) {
6991                         case I40E_MAC_PERFECT_MATCH:
6992                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6993                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6994                                 break;
6995                         case I40E_MACVLAN_PERFECT_MATCH:
6996                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6997                                 break;
6998                         case I40E_MAC_HASH_MATCH:
6999                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
7000                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
7001                                 break;
7002                         case I40E_MACVLAN_HASH_MATCH:
7003                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
7004                                 break;
7005                         default:
7006                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
7007                                 ret = I40E_ERR_PARAM;
7008                                 goto DONE;
7009                         }
7010
7011                         req_list[i].queue_number = 0;
7012
7013                         req_list[i].flags = rte_cpu_to_le_16(flags);
7014                 }
7015
7016                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
7017                                                 actual_num, NULL);
7018                 if (ret != I40E_SUCCESS) {
7019                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
7020                         goto DONE;
7021                 }
7022                 num += actual_num;
7023         } while (num < total);
7024
7025 DONE:
7026         rte_free(req_list);
7027         return ret;
7028 }
7029
7030 int
7031 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
7032                             struct i40e_macvlan_filter *filter,
7033                             int total)
7034 {
7035         int ele_num, ele_buff_size;
7036         int num, actual_num, i;
7037         uint16_t flags;
7038         int ret = I40E_SUCCESS;
7039         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7040         struct i40e_aqc_remove_macvlan_element_data *req_list;
7041
7042         if (filter == NULL  || total == 0)
7043                 return I40E_ERR_PARAM;
7044
7045         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7046         ele_buff_size = hw->aq.asq_buf_size;
7047
7048         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7049         if (req_list == NULL) {
7050                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
7051                 return I40E_ERR_NO_MEMORY;
7052         }
7053
7054         num = 0;
7055         do {
7056                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7057                 memset(req_list, 0, ele_buff_size);
7058
7059                 for (i = 0; i < actual_num; i++) {
7060                         rte_memcpy(req_list[i].mac_addr,
7061                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
7062                         req_list[i].vlan_tag =
7063                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
7064
7065                         switch (filter[num + i].filter_type) {
7066                         case I40E_MAC_PERFECT_MATCH:
7067                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7068                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7069                                 break;
7070                         case I40E_MACVLAN_PERFECT_MATCH:
7071                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7072                                 break;
7073                         case I40E_MAC_HASH_MATCH:
7074                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7075                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7076                                 break;
7077                         case I40E_MACVLAN_HASH_MATCH:
7078                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7079                                 break;
7080                         default:
7081                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7082                                 ret = I40E_ERR_PARAM;
7083                                 goto DONE;
7084                         }
7085                         req_list[i].flags = rte_cpu_to_le_16(flags);
7086                 }
7087
7088                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
7089                                                 actual_num, NULL);
7090                 if (ret != I40E_SUCCESS) {
7091                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7092                         goto DONE;
7093                 }
7094                 num += actual_num;
7095         } while (num < total);
7096
7097 DONE:
7098         rte_free(req_list);
7099         return ret;
7100 }
7101
7102 /* Find out specific MAC filter */
7103 static struct i40e_mac_filter *
7104 i40e_find_mac_filter(struct i40e_vsi *vsi,
7105                          struct rte_ether_addr *macaddr)
7106 {
7107         struct i40e_mac_filter *f;
7108
7109         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7110                 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7111                         return f;
7112         }
7113
7114         return NULL;
7115 }
7116
7117 static bool
7118 i40e_find_vlan_filter(struct i40e_vsi *vsi,
7119                          uint16_t vlan_id)
7120 {
7121         uint32_t vid_idx, vid_bit;
7122
7123         if (vlan_id > ETH_VLAN_ID_MAX)
7124                 return 0;
7125
7126         vid_idx = I40E_VFTA_IDX(vlan_id);
7127         vid_bit = I40E_VFTA_BIT(vlan_id);
7128
7129         if (vsi->vfta[vid_idx] & vid_bit)
7130                 return 1;
7131         else
7132                 return 0;
7133 }
7134
7135 static void
7136 i40e_store_vlan_filter(struct i40e_vsi *vsi,
7137                        uint16_t vlan_id, bool on)
7138 {
7139         uint32_t vid_idx, vid_bit;
7140
7141         vid_idx = I40E_VFTA_IDX(vlan_id);
7142         vid_bit = I40E_VFTA_BIT(vlan_id);
7143
7144         if (on)
7145                 vsi->vfta[vid_idx] |= vid_bit;
7146         else
7147                 vsi->vfta[vid_idx] &= ~vid_bit;
7148 }
7149
7150 void
7151 i40e_set_vlan_filter(struct i40e_vsi *vsi,
7152                      uint16_t vlan_id, bool on)
7153 {
7154         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7155         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7156         int ret;
7157
7158         if (vlan_id > ETH_VLAN_ID_MAX)
7159                 return;
7160
7161         i40e_store_vlan_filter(vsi, vlan_id, on);
7162
7163         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7164                 return;
7165
7166         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7167
7168         if (on) {
7169                 ret = i40e_aq_add_vlan(hw, vsi->seid,
7170                                        &vlan_data, 1, NULL);
7171                 if (ret != I40E_SUCCESS)
7172                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7173         } else {
7174                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
7175                                           &vlan_data, 1, NULL);
7176                 if (ret != I40E_SUCCESS)
7177                         PMD_DRV_LOG(ERR,
7178                                     "Failed to remove vlan filter");
7179         }
7180 }
7181
7182 /**
7183  * Find all vlan options for specific mac addr,
7184  * return with actual vlan found.
7185  */
7186 int
7187 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7188                            struct i40e_macvlan_filter *mv_f,
7189                            int num, struct rte_ether_addr *addr)
7190 {
7191         int i;
7192         uint32_t j, k;
7193
7194         /**
7195          * Not to use i40e_find_vlan_filter to decrease the loop time,
7196          * although the code looks complex.
7197           */
7198         if (num < vsi->vlan_num)
7199                 return I40E_ERR_PARAM;
7200
7201         i = 0;
7202         for (j = 0; j < I40E_VFTA_SIZE; j++) {
7203                 if (vsi->vfta[j]) {
7204                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7205                                 if (vsi->vfta[j] & (1 << k)) {
7206                                         if (i > num - 1) {
7207                                                 PMD_DRV_LOG(ERR,
7208                                                         "vlan number doesn't match");
7209                                                 return I40E_ERR_PARAM;
7210                                         }
7211                                         rte_memcpy(&mv_f[i].macaddr,
7212                                                         addr, ETH_ADDR_LEN);
7213                                         mv_f[i].vlan_id =
7214                                                 j * I40E_UINT32_BIT_SIZE + k;
7215                                         i++;
7216                                 }
7217                         }
7218                 }
7219         }
7220         return I40E_SUCCESS;
7221 }
7222
7223 static inline int
7224 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7225                            struct i40e_macvlan_filter *mv_f,
7226                            int num,
7227                            uint16_t vlan)
7228 {
7229         int i = 0;
7230         struct i40e_mac_filter *f;
7231
7232         if (num < vsi->mac_num)
7233                 return I40E_ERR_PARAM;
7234
7235         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7236                 if (i > num - 1) {
7237                         PMD_DRV_LOG(ERR, "buffer number not match");
7238                         return I40E_ERR_PARAM;
7239                 }
7240                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7241                                 ETH_ADDR_LEN);
7242                 mv_f[i].vlan_id = vlan;
7243                 mv_f[i].filter_type = f->mac_info.filter_type;
7244                 i++;
7245         }
7246
7247         return I40E_SUCCESS;
7248 }
7249
7250 static int
7251 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7252 {
7253         int i, j, num;
7254         struct i40e_mac_filter *f;
7255         struct i40e_macvlan_filter *mv_f;
7256         int ret = I40E_SUCCESS;
7257
7258         if (vsi == NULL || vsi->mac_num == 0)
7259                 return I40E_ERR_PARAM;
7260
7261         /* Case that no vlan is set */
7262         if (vsi->vlan_num == 0)
7263                 num = vsi->mac_num;
7264         else
7265                 num = vsi->mac_num * vsi->vlan_num;
7266
7267         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7268         if (mv_f == NULL) {
7269                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7270                 return I40E_ERR_NO_MEMORY;
7271         }
7272
7273         i = 0;
7274         if (vsi->vlan_num == 0) {
7275                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7276                         rte_memcpy(&mv_f[i].macaddr,
7277                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
7278                         mv_f[i].filter_type = f->mac_info.filter_type;
7279                         mv_f[i].vlan_id = 0;
7280                         i++;
7281                 }
7282         } else {
7283                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7284                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7285                                         vsi->vlan_num, &f->mac_info.mac_addr);
7286                         if (ret != I40E_SUCCESS)
7287                                 goto DONE;
7288                         for (j = i; j < i + vsi->vlan_num; j++)
7289                                 mv_f[j].filter_type = f->mac_info.filter_type;
7290                         i += vsi->vlan_num;
7291                 }
7292         }
7293
7294         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7295 DONE:
7296         rte_free(mv_f);
7297
7298         return ret;
7299 }
7300
7301 int
7302 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7303 {
7304         struct i40e_macvlan_filter *mv_f;
7305         int mac_num;
7306         int ret = I40E_SUCCESS;
7307
7308         if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7309                 return I40E_ERR_PARAM;
7310
7311         /* If it's already set, just return */
7312         if (i40e_find_vlan_filter(vsi,vlan))
7313                 return I40E_SUCCESS;
7314
7315         mac_num = vsi->mac_num;
7316
7317         if (mac_num == 0) {
7318                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7319                 return I40E_ERR_PARAM;
7320         }
7321
7322         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7323
7324         if (mv_f == NULL) {
7325                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7326                 return I40E_ERR_NO_MEMORY;
7327         }
7328
7329         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7330
7331         if (ret != I40E_SUCCESS)
7332                 goto DONE;
7333
7334         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7335
7336         if (ret != I40E_SUCCESS)
7337                 goto DONE;
7338
7339         i40e_set_vlan_filter(vsi, vlan, 1);
7340
7341         vsi->vlan_num++;
7342         ret = I40E_SUCCESS;
7343 DONE:
7344         rte_free(mv_f);
7345         return ret;
7346 }
7347
7348 int
7349 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7350 {
7351         struct i40e_macvlan_filter *mv_f;
7352         int mac_num;
7353         int ret = I40E_SUCCESS;
7354
7355         /**
7356          * Vlan 0 is the generic filter for untagged packets
7357          * and can't be removed.
7358          */
7359         if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7360                 return I40E_ERR_PARAM;
7361
7362         /* If can't find it, just return */
7363         if (!i40e_find_vlan_filter(vsi, vlan))
7364                 return I40E_ERR_PARAM;
7365
7366         mac_num = vsi->mac_num;
7367
7368         if (mac_num == 0) {
7369                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7370                 return I40E_ERR_PARAM;
7371         }
7372
7373         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7374
7375         if (mv_f == NULL) {
7376                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7377                 return I40E_ERR_NO_MEMORY;
7378         }
7379
7380         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7381
7382         if (ret != I40E_SUCCESS)
7383                 goto DONE;
7384
7385         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7386
7387         if (ret != I40E_SUCCESS)
7388                 goto DONE;
7389
7390         /* This is last vlan to remove, replace all mac filter with vlan 0 */
7391         if (vsi->vlan_num == 1) {
7392                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7393                 if (ret != I40E_SUCCESS)
7394                         goto DONE;
7395
7396                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7397                 if (ret != I40E_SUCCESS)
7398                         goto DONE;
7399         }
7400
7401         i40e_set_vlan_filter(vsi, vlan, 0);
7402
7403         vsi->vlan_num--;
7404         ret = I40E_SUCCESS;
7405 DONE:
7406         rte_free(mv_f);
7407         return ret;
7408 }
7409
7410 int
7411 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7412 {
7413         struct i40e_mac_filter *f;
7414         struct i40e_macvlan_filter *mv_f;
7415         int i, vlan_num = 0;
7416         int ret = I40E_SUCCESS;
7417
7418         /* If it's add and we've config it, return */
7419         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7420         if (f != NULL)
7421                 return I40E_SUCCESS;
7422         if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7423                 mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7424
7425                 /**
7426                  * If vlan_num is 0, that's the first time to add mac,
7427                  * set mask for vlan_id 0.
7428                  */
7429                 if (vsi->vlan_num == 0) {
7430                         i40e_set_vlan_filter(vsi, 0, 1);
7431                         vsi->vlan_num = 1;
7432                 }
7433                 vlan_num = vsi->vlan_num;
7434         } else if (mac_filter->filter_type == I40E_MAC_PERFECT_MATCH ||
7435                         mac_filter->filter_type == I40E_MAC_HASH_MATCH)
7436                 vlan_num = 1;
7437
7438         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7439         if (mv_f == NULL) {
7440                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7441                 return I40E_ERR_NO_MEMORY;
7442         }
7443
7444         for (i = 0; i < vlan_num; i++) {
7445                 mv_f[i].filter_type = mac_filter->filter_type;
7446                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7447                                 ETH_ADDR_LEN);
7448         }
7449
7450         if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7451                 mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7452                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7453                                         &mac_filter->mac_addr);
7454                 if (ret != I40E_SUCCESS)
7455                         goto DONE;
7456         }
7457
7458         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7459         if (ret != I40E_SUCCESS)
7460                 goto DONE;
7461
7462         /* Add the mac addr into mac list */
7463         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7464         if (f == NULL) {
7465                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7466                 ret = I40E_ERR_NO_MEMORY;
7467                 goto DONE;
7468         }
7469         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7470                         ETH_ADDR_LEN);
7471         f->mac_info.filter_type = mac_filter->filter_type;
7472         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7473         vsi->mac_num++;
7474
7475         ret = I40E_SUCCESS;
7476 DONE:
7477         rte_free(mv_f);
7478
7479         return ret;
7480 }
7481
7482 int
7483 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7484 {
7485         struct i40e_mac_filter *f;
7486         struct i40e_macvlan_filter *mv_f;
7487         int i, vlan_num;
7488         enum i40e_mac_filter_type filter_type;
7489         int ret = I40E_SUCCESS;
7490
7491         /* Can't find it, return an error */
7492         f = i40e_find_mac_filter(vsi, addr);
7493         if (f == NULL)
7494                 return I40E_ERR_PARAM;
7495
7496         vlan_num = vsi->vlan_num;
7497         filter_type = f->mac_info.filter_type;
7498         if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7499                 filter_type == I40E_MACVLAN_HASH_MATCH) {
7500                 if (vlan_num == 0) {
7501                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7502                         return I40E_ERR_PARAM;
7503                 }
7504         } else if (filter_type == I40E_MAC_PERFECT_MATCH ||
7505                         filter_type == I40E_MAC_HASH_MATCH)
7506                 vlan_num = 1;
7507
7508         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7509         if (mv_f == NULL) {
7510                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7511                 return I40E_ERR_NO_MEMORY;
7512         }
7513
7514         for (i = 0; i < vlan_num; i++) {
7515                 mv_f[i].filter_type = filter_type;
7516                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7517                                 ETH_ADDR_LEN);
7518         }
7519         if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7520                         filter_type == I40E_MACVLAN_HASH_MATCH) {
7521                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7522                 if (ret != I40E_SUCCESS)
7523                         goto DONE;
7524         }
7525
7526         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7527         if (ret != I40E_SUCCESS)
7528                 goto DONE;
7529
7530         /* Remove the mac addr into mac list */
7531         TAILQ_REMOVE(&vsi->mac_list, f, next);
7532         rte_free(f);
7533         vsi->mac_num--;
7534
7535         ret = I40E_SUCCESS;
7536 DONE:
7537         rte_free(mv_f);
7538         return ret;
7539 }
7540
7541 /* Configure hash enable flags for RSS */
7542 uint64_t
7543 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7544 {
7545         uint64_t hena = 0;
7546         int i;
7547
7548         if (!flags)
7549                 return hena;
7550
7551         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7552                 if (flags & (1ULL << i))
7553                         hena |= adapter->pctypes_tbl[i];
7554         }
7555
7556         return hena;
7557 }
7558
7559 /* Parse the hash enable flags */
7560 uint64_t
7561 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7562 {
7563         uint64_t rss_hf = 0;
7564
7565         if (!flags)
7566                 return rss_hf;
7567         int i;
7568
7569         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7570                 if (flags & adapter->pctypes_tbl[i])
7571                         rss_hf |= (1ULL << i);
7572         }
7573         return rss_hf;
7574 }
7575
7576 /* Disable RSS */
7577 void
7578 i40e_pf_disable_rss(struct i40e_pf *pf)
7579 {
7580         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7581
7582         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7583         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7584         I40E_WRITE_FLUSH(hw);
7585 }
7586
7587 int
7588 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7589 {
7590         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7591         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7592         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7593                            I40E_VFQF_HKEY_MAX_INDEX :
7594                            I40E_PFQF_HKEY_MAX_INDEX;
7595
7596         if (!key || key_len == 0) {
7597                 PMD_DRV_LOG(DEBUG, "No key to be configured");
7598                 return 0;
7599         } else if (key_len != (key_idx + 1) *
7600                 sizeof(uint32_t)) {
7601                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7602                 return -EINVAL;
7603         }
7604
7605         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7606                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7607                                 (struct i40e_aqc_get_set_rss_key_data *)key;
7608                 enum i40e_status_code status =
7609                                 i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7610
7611                 if (status) {
7612                         PMD_DRV_LOG(ERR,
7613                                     "Failed to configure RSS key via AQ, error status: %d",
7614                                     status);
7615                         return -EIO;
7616                 }
7617         } else {
7618                 uint32_t *hash_key = (uint32_t *)key;
7619                 uint16_t i;
7620
7621                 if (vsi->type == I40E_VSI_SRIOV) {
7622                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7623                                 I40E_WRITE_REG(
7624                                         hw,
7625                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7626                                         hash_key[i]);
7627
7628                 } else {
7629                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7630                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7631                                                hash_key[i]);
7632                 }
7633                 I40E_WRITE_FLUSH(hw);
7634         }
7635
7636         return 0;
7637 }
7638
7639 static int
7640 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7641 {
7642         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7643         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7644         uint32_t reg;
7645         int ret;
7646
7647         if (!key || !key_len)
7648                 return 0;
7649
7650         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7651                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7652                         (struct i40e_aqc_get_set_rss_key_data *)key);
7653                 if (ret) {
7654                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7655                         return ret;
7656                 }
7657         } else {
7658                 uint32_t *key_dw = (uint32_t *)key;
7659                 uint16_t i;
7660
7661                 if (vsi->type == I40E_VSI_SRIOV) {
7662                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7663                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7664                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7665                         }
7666                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7667                                    sizeof(uint32_t);
7668                 } else {
7669                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7670                                 reg = I40E_PFQF_HKEY(i);
7671                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7672                         }
7673                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7674                                    sizeof(uint32_t);
7675                 }
7676         }
7677         return 0;
7678 }
7679
7680 static int
7681 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7682 {
7683         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7684         uint64_t hena;
7685         int ret;
7686
7687         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7688                                rss_conf->rss_key_len);
7689         if (ret)
7690                 return ret;
7691
7692         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7693         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7694         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7695         I40E_WRITE_FLUSH(hw);
7696
7697         return 0;
7698 }
7699
7700 static int
7701 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7702                          struct rte_eth_rss_conf *rss_conf)
7703 {
7704         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7705         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7706         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7707         uint64_t hena;
7708
7709         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7710         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7711
7712         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7713                 if (rss_hf != 0) /* Enable RSS */
7714                         return -EINVAL;
7715                 return 0; /* Nothing to do */
7716         }
7717         /* RSS enabled */
7718         if (rss_hf == 0) /* Disable RSS */
7719                 return -EINVAL;
7720
7721         return i40e_hw_rss_hash_set(pf, rss_conf);
7722 }
7723
7724 static int
7725 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7726                            struct rte_eth_rss_conf *rss_conf)
7727 {
7728         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7729         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7730         uint64_t hena;
7731         int ret;
7732
7733         if (!rss_conf)
7734                 return -EINVAL;
7735
7736         ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7737                          &rss_conf->rss_key_len);
7738         if (ret)
7739                 return ret;
7740
7741         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7742         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7743         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7744
7745         return 0;
7746 }
7747
7748 static int
7749 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7750 {
7751         switch (filter_type) {
7752         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7753                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7754                 break;
7755         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7756                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7757                 break;
7758         case RTE_TUNNEL_FILTER_IMAC_TENID:
7759                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7760                 break;
7761         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7762                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7763                 break;
7764         case ETH_TUNNEL_FILTER_IMAC:
7765                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7766                 break;
7767         case ETH_TUNNEL_FILTER_OIP:
7768                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7769                 break;
7770         case ETH_TUNNEL_FILTER_IIP:
7771                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7772                 break;
7773         default:
7774                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7775                 return -EINVAL;
7776         }
7777
7778         return 0;
7779 }
7780
7781 /* Convert tunnel filter structure */
7782 static int
7783 i40e_tunnel_filter_convert(
7784         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7785         struct i40e_tunnel_filter *tunnel_filter)
7786 {
7787         rte_ether_addr_copy((struct rte_ether_addr *)
7788                         &cld_filter->element.outer_mac,
7789                 (struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7790         rte_ether_addr_copy((struct rte_ether_addr *)
7791                         &cld_filter->element.inner_mac,
7792                 (struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7793         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7794         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7795              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7796             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7797                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7798         else
7799                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7800         tunnel_filter->input.flags = cld_filter->element.flags;
7801         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7802         tunnel_filter->queue = cld_filter->element.queue_number;
7803         rte_memcpy(tunnel_filter->input.general_fields,
7804                    cld_filter->general_fields,
7805                    sizeof(cld_filter->general_fields));
7806
7807         return 0;
7808 }
7809
7810 /* Check if there exists the tunnel filter */
7811 struct i40e_tunnel_filter *
7812 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7813                              const struct i40e_tunnel_filter_input *input)
7814 {
7815         int ret;
7816
7817         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7818         if (ret < 0)
7819                 return NULL;
7820
7821         return tunnel_rule->hash_map[ret];
7822 }
7823
7824 /* Add a tunnel filter into the SW list */
7825 static int
7826 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7827                              struct i40e_tunnel_filter *tunnel_filter)
7828 {
7829         struct i40e_tunnel_rule *rule = &pf->tunnel;
7830         int ret;
7831
7832         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7833         if (ret < 0) {
7834                 PMD_DRV_LOG(ERR,
7835                             "Failed to insert tunnel filter to hash table %d!",
7836                             ret);
7837                 return ret;
7838         }
7839         rule->hash_map[ret] = tunnel_filter;
7840
7841         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7842
7843         return 0;
7844 }
7845
7846 /* Delete a tunnel filter from the SW list */
7847 int
7848 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7849                           struct i40e_tunnel_filter_input *input)
7850 {
7851         struct i40e_tunnel_rule *rule = &pf->tunnel;
7852         struct i40e_tunnel_filter *tunnel_filter;
7853         int ret;
7854
7855         ret = rte_hash_del_key(rule->hash_table, input);
7856         if (ret < 0) {
7857                 PMD_DRV_LOG(ERR,
7858                             "Failed to delete tunnel filter to hash table %d!",
7859                             ret);
7860                 return ret;
7861         }
7862         tunnel_filter = rule->hash_map[ret];
7863         rule->hash_map[ret] = NULL;
7864
7865         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7866         rte_free(tunnel_filter);
7867
7868         return 0;
7869 }
7870
7871 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7872 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7873 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7874 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7875 #define I40E_TR_GRE_KEY_MASK                    0x400
7876 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7877 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7878 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
7879 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
7880 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
7881 #define I40E_DIRECTION_INGRESS_KEY              0x8000
7882 #define I40E_TR_L4_TYPE_TCP                     0x2
7883 #define I40E_TR_L4_TYPE_UDP                     0x4
7884 #define I40E_TR_L4_TYPE_SCTP                    0x8
7885
7886 static enum
7887 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7888 {
7889         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7890         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7891         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7892         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
7893         enum i40e_status_code status = I40E_SUCCESS;
7894
7895         if (pf->support_multi_driver) {
7896                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7897                 return I40E_NOT_SUPPORTED;
7898         }
7899
7900         memset(&filter_replace, 0,
7901                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7902         memset(&filter_replace_buf, 0,
7903                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7904
7905         /* create L1 filter */
7906         filter_replace.old_filter_type =
7907                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7908         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7909         filter_replace.tr_bit = 0;
7910
7911         /* Prepare the buffer, 3 entries */
7912         filter_replace_buf.data[0] =
7913                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7914         filter_replace_buf.data[0] |=
7915                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7916         filter_replace_buf.data[2] = 0xFF;
7917         filter_replace_buf.data[3] = 0xFF;
7918         filter_replace_buf.data[4] =
7919                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7920         filter_replace_buf.data[4] |=
7921                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7922         filter_replace_buf.data[7] = 0xF0;
7923         filter_replace_buf.data[8]
7924                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7925         filter_replace_buf.data[8] |=
7926                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7927         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7928                 I40E_TR_GENEVE_KEY_MASK |
7929                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7930         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7931                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7932                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7933
7934         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7935                                                &filter_replace_buf);
7936         if (!status && (filter_replace.old_filter_type !=
7937                         filter_replace.new_filter_type))
7938                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7939                             " original: 0x%x, new: 0x%x",
7940                             dev->device->name,
7941                             filter_replace.old_filter_type,
7942                             filter_replace.new_filter_type);
7943
7944         return status;
7945 }
7946
7947 static enum
7948 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7949 {
7950         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7951         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7952         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7953         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
7954         enum i40e_status_code status = I40E_SUCCESS;
7955
7956         if (pf->support_multi_driver) {
7957                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7958                 return I40E_NOT_SUPPORTED;
7959         }
7960
7961         /* For MPLSoUDP */
7962         memset(&filter_replace, 0,
7963                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7964         memset(&filter_replace_buf, 0,
7965                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7966         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7967                 I40E_AQC_MIRROR_CLOUD_FILTER;
7968         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7969         filter_replace.new_filter_type =
7970                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7971         /* Prepare the buffer, 2 entries */
7972         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7973         filter_replace_buf.data[0] |=
7974                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7975         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7976         filter_replace_buf.data[4] |=
7977                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7978         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7979                                                &filter_replace_buf);
7980         if (status < 0)
7981                 return status;
7982         if (filter_replace.old_filter_type !=
7983             filter_replace.new_filter_type)
7984                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7985                             " original: 0x%x, new: 0x%x",
7986                             dev->device->name,
7987                             filter_replace.old_filter_type,
7988                             filter_replace.new_filter_type);
7989
7990         /* For MPLSoGRE */
7991         memset(&filter_replace, 0,
7992                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7993         memset(&filter_replace_buf, 0,
7994                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7995
7996         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7997                 I40E_AQC_MIRROR_CLOUD_FILTER;
7998         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7999         filter_replace.new_filter_type =
8000                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8001         /* Prepare the buffer, 2 entries */
8002         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8003         filter_replace_buf.data[0] |=
8004                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8005         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8006         filter_replace_buf.data[4] |=
8007                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8008
8009         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8010                                                &filter_replace_buf);
8011         if (!status && (filter_replace.old_filter_type !=
8012                         filter_replace.new_filter_type))
8013                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8014                             " original: 0x%x, new: 0x%x",
8015                             dev->device->name,
8016                             filter_replace.old_filter_type,
8017                             filter_replace.new_filter_type);
8018
8019         return status;
8020 }
8021
8022 static enum i40e_status_code
8023 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
8024 {
8025         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8026         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8027         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8028         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8029         enum i40e_status_code status = I40E_SUCCESS;
8030
8031         if (pf->support_multi_driver) {
8032                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8033                 return I40E_NOT_SUPPORTED;
8034         }
8035
8036         /* For GTP-C */
8037         memset(&filter_replace, 0,
8038                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8039         memset(&filter_replace_buf, 0,
8040                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8041         /* create L1 filter */
8042         filter_replace.old_filter_type =
8043                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8044         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8045         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8046                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8047         /* Prepare the buffer, 2 entries */
8048         filter_replace_buf.data[0] =
8049                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8050         filter_replace_buf.data[0] |=
8051                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8052         filter_replace_buf.data[2] = 0xFF;
8053         filter_replace_buf.data[3] = 0xFF;
8054         filter_replace_buf.data[4] =
8055                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8056         filter_replace_buf.data[4] |=
8057                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8058         filter_replace_buf.data[6] = 0xFF;
8059         filter_replace_buf.data[7] = 0xFF;
8060         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8061                                                &filter_replace_buf);
8062         if (status < 0)
8063                 return status;
8064         if (filter_replace.old_filter_type !=
8065             filter_replace.new_filter_type)
8066                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8067                             " original: 0x%x, new: 0x%x",
8068                             dev->device->name,
8069                             filter_replace.old_filter_type,
8070                             filter_replace.new_filter_type);
8071
8072         /* for GTP-U */
8073         memset(&filter_replace, 0,
8074                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8075         memset(&filter_replace_buf, 0,
8076                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8077         /* create L1 filter */
8078         filter_replace.old_filter_type =
8079                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8080         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8081         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8082                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8083         /* Prepare the buffer, 2 entries */
8084         filter_replace_buf.data[0] =
8085                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8086         filter_replace_buf.data[0] |=
8087                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8088         filter_replace_buf.data[2] = 0xFF;
8089         filter_replace_buf.data[3] = 0xFF;
8090         filter_replace_buf.data[4] =
8091                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8092         filter_replace_buf.data[4] |=
8093                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8094         filter_replace_buf.data[6] = 0xFF;
8095         filter_replace_buf.data[7] = 0xFF;
8096
8097         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8098                                                &filter_replace_buf);
8099         if (!status && (filter_replace.old_filter_type !=
8100                         filter_replace.new_filter_type))
8101                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8102                             " original: 0x%x, new: 0x%x",
8103                             dev->device->name,
8104                             filter_replace.old_filter_type,
8105                             filter_replace.new_filter_type);
8106
8107         return status;
8108 }
8109
8110 static enum
8111 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8112 {
8113         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8114         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8115         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8116         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8117         enum i40e_status_code status = I40E_SUCCESS;
8118
8119         if (pf->support_multi_driver) {
8120                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8121                 return I40E_NOT_SUPPORTED;
8122         }
8123
8124         /* for GTP-C */
8125         memset(&filter_replace, 0,
8126                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8127         memset(&filter_replace_buf, 0,
8128                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8129         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8130         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8131         filter_replace.new_filter_type =
8132                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8133         /* Prepare the buffer, 2 entries */
8134         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8135         filter_replace_buf.data[0] |=
8136                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8137         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8138         filter_replace_buf.data[4] |=
8139                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8140         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8141                                                &filter_replace_buf);
8142         if (status < 0)
8143                 return status;
8144         if (filter_replace.old_filter_type !=
8145             filter_replace.new_filter_type)
8146                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8147                             " original: 0x%x, new: 0x%x",
8148                             dev->device->name,
8149                             filter_replace.old_filter_type,
8150                             filter_replace.new_filter_type);
8151
8152         /* for GTP-U */
8153         memset(&filter_replace, 0,
8154                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8155         memset(&filter_replace_buf, 0,
8156                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8157         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8158         filter_replace.old_filter_type =
8159                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8160         filter_replace.new_filter_type =
8161                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8162         /* Prepare the buffer, 2 entries */
8163         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8164         filter_replace_buf.data[0] |=
8165                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8166         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8167         filter_replace_buf.data[4] |=
8168                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8169
8170         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8171                                                &filter_replace_buf);
8172         if (!status && (filter_replace.old_filter_type !=
8173                         filter_replace.new_filter_type))
8174                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8175                             " original: 0x%x, new: 0x%x",
8176                             dev->device->name,
8177                             filter_replace.old_filter_type,
8178                             filter_replace.new_filter_type);
8179
8180         return status;
8181 }
8182
8183 static enum i40e_status_code
8184 i40e_replace_port_l1_filter(struct i40e_pf *pf,
8185                             enum i40e_l4_port_type l4_port_type)
8186 {
8187         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8188         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8189         enum i40e_status_code status = I40E_SUCCESS;
8190         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8191         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8192
8193         if (pf->support_multi_driver) {
8194                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8195                 return I40E_NOT_SUPPORTED;
8196         }
8197
8198         memset(&filter_replace, 0,
8199                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8200         memset(&filter_replace_buf, 0,
8201                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8202
8203         /* create L1 filter */
8204         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8205                 filter_replace.old_filter_type =
8206                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8207                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8208                 filter_replace_buf.data[8] =
8209                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8210         } else {
8211                 filter_replace.old_filter_type =
8212                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8213                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8214                 filter_replace_buf.data[8] =
8215                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8216         }
8217
8218         filter_replace.tr_bit = 0;
8219         /* Prepare the buffer, 3 entries */
8220         filter_replace_buf.data[0] =
8221                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8222         filter_replace_buf.data[0] |=
8223                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8224         filter_replace_buf.data[2] = 0x00;
8225         filter_replace_buf.data[3] =
8226                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8227         filter_replace_buf.data[4] =
8228                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8229         filter_replace_buf.data[4] |=
8230                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8231         filter_replace_buf.data[5] = 0x00;
8232         filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8233                 I40E_TR_L4_TYPE_TCP |
8234                 I40E_TR_L4_TYPE_SCTP;
8235         filter_replace_buf.data[7] = 0x00;
8236         filter_replace_buf.data[8] |=
8237                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8238         filter_replace_buf.data[9] = 0x00;
8239         filter_replace_buf.data[10] = 0xFF;
8240         filter_replace_buf.data[11] = 0xFF;
8241
8242         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8243                                                &filter_replace_buf);
8244         if (!status && filter_replace.old_filter_type !=
8245             filter_replace.new_filter_type)
8246                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8247                             " original: 0x%x, new: 0x%x",
8248                             dev->device->name,
8249                             filter_replace.old_filter_type,
8250                             filter_replace.new_filter_type);
8251
8252         return status;
8253 }
8254
8255 static enum i40e_status_code
8256 i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8257                                enum i40e_l4_port_type l4_port_type)
8258 {
8259         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8260         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8261         enum i40e_status_code status = I40E_SUCCESS;
8262         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8263         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8264
8265         if (pf->support_multi_driver) {
8266                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8267                 return I40E_NOT_SUPPORTED;
8268         }
8269
8270         memset(&filter_replace, 0,
8271                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8272         memset(&filter_replace_buf, 0,
8273                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8274
8275         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8276                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8277                 filter_replace.new_filter_type =
8278                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8279                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8280         } else {
8281                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8282                 filter_replace.new_filter_type =
8283                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8284                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8285         }
8286
8287         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8288         filter_replace.tr_bit = 0;
8289         /* Prepare the buffer, 2 entries */
8290         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8291         filter_replace_buf.data[0] |=
8292                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8293         filter_replace_buf.data[4] |=
8294                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8295         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8296                                                &filter_replace_buf);
8297
8298         if (!status && filter_replace.old_filter_type !=
8299             filter_replace.new_filter_type)
8300                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8301                             " original: 0x%x, new: 0x%x",
8302                             dev->device->name,
8303                             filter_replace.old_filter_type,
8304                             filter_replace.new_filter_type);
8305
8306         return status;
8307 }
8308
8309 int
8310 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8311                       struct i40e_tunnel_filter_conf *tunnel_filter,
8312                       uint8_t add)
8313 {
8314         uint16_t ip_type;
8315         uint32_t ipv4_addr, ipv4_addr_le;
8316         uint8_t i, tun_type = 0;
8317         /* internal variable to convert ipv6 byte order */
8318         uint32_t convert_ipv6[4];
8319         int val, ret = 0;
8320         struct i40e_pf_vf *vf = NULL;
8321         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8322         struct i40e_vsi *vsi;
8323         struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8324         struct i40e_aqc_cloud_filters_element_bb *pfilter;
8325         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8326         struct i40e_tunnel_filter *tunnel, *node;
8327         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8328         uint32_t teid_le;
8329         bool big_buffer = 0;
8330
8331         cld_filter = rte_zmalloc("tunnel_filter",
8332                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8333                          0);
8334
8335         if (cld_filter == NULL) {
8336                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8337                 return -ENOMEM;
8338         }
8339         pfilter = cld_filter;
8340
8341         rte_ether_addr_copy(&tunnel_filter->outer_mac,
8342                         (struct rte_ether_addr *)&pfilter->element.outer_mac);
8343         rte_ether_addr_copy(&tunnel_filter->inner_mac,
8344                         (struct rte_ether_addr *)&pfilter->element.inner_mac);
8345
8346         pfilter->element.inner_vlan =
8347                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8348         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8349                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8350                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8351                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8352                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
8353                                 &ipv4_addr_le,
8354                                 sizeof(pfilter->element.ipaddr.v4.data));
8355         } else {
8356                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8357                 for (i = 0; i < 4; i++) {
8358                         convert_ipv6[i] =
8359                         rte_cpu_to_le_32(rte_be_to_cpu_32(
8360                                          tunnel_filter->ip_addr.ipv6_addr[i]));
8361                 }
8362                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
8363                            &convert_ipv6,
8364                            sizeof(pfilter->element.ipaddr.v6.data));
8365         }
8366
8367         /* check tunneled type */
8368         switch (tunnel_filter->tunnel_type) {
8369         case I40E_TUNNEL_TYPE_VXLAN:
8370                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8371                 break;
8372         case I40E_TUNNEL_TYPE_NVGRE:
8373                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8374                 break;
8375         case I40E_TUNNEL_TYPE_IP_IN_GRE:
8376                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8377                 break;
8378         case I40E_TUNNEL_TYPE_MPLSoUDP:
8379                 if (!pf->mpls_replace_flag) {
8380                         i40e_replace_mpls_l1_filter(pf);
8381                         i40e_replace_mpls_cloud_filter(pf);
8382                         pf->mpls_replace_flag = 1;
8383                 }
8384                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8385                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8386                         teid_le >> 4;
8387                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8388                         (teid_le & 0xF) << 12;
8389                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8390                         0x40;
8391                 big_buffer = 1;
8392                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8393                 break;
8394         case I40E_TUNNEL_TYPE_MPLSoGRE:
8395                 if (!pf->mpls_replace_flag) {
8396                         i40e_replace_mpls_l1_filter(pf);
8397                         i40e_replace_mpls_cloud_filter(pf);
8398                         pf->mpls_replace_flag = 1;
8399                 }
8400                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8401                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8402                         teid_le >> 4;
8403                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8404                         (teid_le & 0xF) << 12;
8405                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8406                         0x0;
8407                 big_buffer = 1;
8408                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8409                 break;
8410         case I40E_TUNNEL_TYPE_GTPC:
8411                 if (!pf->gtp_replace_flag) {
8412                         i40e_replace_gtp_l1_filter(pf);
8413                         i40e_replace_gtp_cloud_filter(pf);
8414                         pf->gtp_replace_flag = 1;
8415                 }
8416                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8417                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8418                         (teid_le >> 16) & 0xFFFF;
8419                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8420                         teid_le & 0xFFFF;
8421                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8422                         0x0;
8423                 big_buffer = 1;
8424                 break;
8425         case I40E_TUNNEL_TYPE_GTPU:
8426                 if (!pf->gtp_replace_flag) {
8427                         i40e_replace_gtp_l1_filter(pf);
8428                         i40e_replace_gtp_cloud_filter(pf);
8429                         pf->gtp_replace_flag = 1;
8430                 }
8431                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8432                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8433                         (teid_le >> 16) & 0xFFFF;
8434                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8435                         teid_le & 0xFFFF;
8436                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8437                         0x0;
8438                 big_buffer = 1;
8439                 break;
8440         case I40E_TUNNEL_TYPE_QINQ:
8441                 if (!pf->qinq_replace_flag) {
8442                         ret = i40e_cloud_filter_qinq_create(pf);
8443                         if (ret < 0)
8444                                 PMD_DRV_LOG(DEBUG,
8445                                             "QinQ tunnel filter already created.");
8446                         pf->qinq_replace_flag = 1;
8447                 }
8448                 /*      Add in the General fields the values of
8449                  *      the Outer and Inner VLAN
8450                  *      Big Buffer should be set, see changes in
8451                  *      i40e_aq_add_cloud_filters
8452                  */
8453                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8454                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8455                 big_buffer = 1;
8456                 break;
8457         case I40E_CLOUD_TYPE_UDP:
8458         case I40E_CLOUD_TYPE_TCP:
8459         case I40E_CLOUD_TYPE_SCTP:
8460                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8461                         if (!pf->sport_replace_flag) {
8462                                 i40e_replace_port_l1_filter(pf,
8463                                                 tunnel_filter->l4_port_type);
8464                                 i40e_replace_port_cloud_filter(pf,
8465                                                 tunnel_filter->l4_port_type);
8466                                 pf->sport_replace_flag = 1;
8467                         }
8468                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8469                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8470                                 I40E_DIRECTION_INGRESS_KEY;
8471
8472                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8473                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8474                                         I40E_TR_L4_TYPE_UDP;
8475                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8476                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8477                                         I40E_TR_L4_TYPE_TCP;
8478                         else
8479                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8480                                         I40E_TR_L4_TYPE_SCTP;
8481
8482                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8483                                 (teid_le >> 16) & 0xFFFF;
8484                         big_buffer = 1;
8485                 } else {
8486                         if (!pf->dport_replace_flag) {
8487                                 i40e_replace_port_l1_filter(pf,
8488                                                 tunnel_filter->l4_port_type);
8489                                 i40e_replace_port_cloud_filter(pf,
8490                                                 tunnel_filter->l4_port_type);
8491                                 pf->dport_replace_flag = 1;
8492                         }
8493                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8494                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8495                                 I40E_DIRECTION_INGRESS_KEY;
8496
8497                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8498                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8499                                         I40E_TR_L4_TYPE_UDP;
8500                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8501                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8502                                         I40E_TR_L4_TYPE_TCP;
8503                         else
8504                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8505                                         I40E_TR_L4_TYPE_SCTP;
8506
8507                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8508                                 (teid_le >> 16) & 0xFFFF;
8509                         big_buffer = 1;
8510                 }
8511
8512                 break;
8513         default:
8514                 /* Other tunnel types is not supported. */
8515                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8516                 rte_free(cld_filter);
8517                 return -EINVAL;
8518         }
8519
8520         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8521                 pfilter->element.flags =
8522                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8523         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8524                 pfilter->element.flags =
8525                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8526         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8527                 pfilter->element.flags =
8528                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8529         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8530                 pfilter->element.flags =
8531                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8532         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8533                 pfilter->element.flags |=
8534                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8535         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8536                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8537                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8538                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8539                         pfilter->element.flags |=
8540                                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8541                 else
8542                         pfilter->element.flags |=
8543                                 I40E_AQC_ADD_CLOUD_FILTER_0X10;
8544         } else {
8545                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8546                                                 &pfilter->element.flags);
8547                 if (val < 0) {
8548                         rte_free(cld_filter);
8549                         return -EINVAL;
8550                 }
8551         }
8552
8553         pfilter->element.flags |= rte_cpu_to_le_16(
8554                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8555                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8556         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8557         pfilter->element.queue_number =
8558                 rte_cpu_to_le_16(tunnel_filter->queue_id);
8559
8560         if (!tunnel_filter->is_to_vf)
8561                 vsi = pf->main_vsi;
8562         else {
8563                 if (tunnel_filter->vf_id >= pf->vf_num) {
8564                         PMD_DRV_LOG(ERR, "Invalid argument.");
8565                         rte_free(cld_filter);
8566                         return -EINVAL;
8567                 }
8568                 vf = &pf->vfs[tunnel_filter->vf_id];
8569                 vsi = vf->vsi;
8570         }
8571
8572         /* Check if there is the filter in SW list */
8573         memset(&check_filter, 0, sizeof(check_filter));
8574         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8575         check_filter.is_to_vf = tunnel_filter->is_to_vf;
8576         check_filter.vf_id = tunnel_filter->vf_id;
8577         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8578         if (add && node) {
8579                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8580                 rte_free(cld_filter);
8581                 return -EINVAL;
8582         }
8583
8584         if (!add && !node) {
8585                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8586                 rte_free(cld_filter);
8587                 return -EINVAL;
8588         }
8589
8590         if (add) {
8591                 if (big_buffer)
8592                         ret = i40e_aq_add_cloud_filters_bb(hw,
8593                                                    vsi->seid, cld_filter, 1);
8594                 else
8595                         ret = i40e_aq_add_cloud_filters(hw,
8596                                         vsi->seid, &cld_filter->element, 1);
8597                 if (ret < 0) {
8598                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8599                         rte_free(cld_filter);
8600                         return -ENOTSUP;
8601                 }
8602                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8603                 if (tunnel == NULL) {
8604                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8605                         rte_free(cld_filter);
8606                         return -ENOMEM;
8607                 }
8608
8609                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8610                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8611                 if (ret < 0)
8612                         rte_free(tunnel);
8613         } else {
8614                 if (big_buffer)
8615                         ret = i40e_aq_rem_cloud_filters_bb(
8616                                 hw, vsi->seid, cld_filter, 1);
8617                 else
8618                         ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8619                                                 &cld_filter->element, 1);
8620                 if (ret < 0) {
8621                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8622                         rte_free(cld_filter);
8623                         return -ENOTSUP;
8624                 }
8625                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8626         }
8627
8628         rte_free(cld_filter);
8629         return ret;
8630 }
8631
8632 static int
8633 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8634 {
8635         uint8_t i;
8636
8637         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8638                 if (pf->vxlan_ports[i] == port)
8639                         return i;
8640         }
8641
8642         return -1;
8643 }
8644
8645 static int
8646 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8647 {
8648         int  idx, ret;
8649         uint8_t filter_idx = 0;
8650         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8651
8652         idx = i40e_get_vxlan_port_idx(pf, port);
8653
8654         /* Check if port already exists */
8655         if (idx >= 0) {
8656                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8657                 return -EINVAL;
8658         }
8659
8660         /* Now check if there is space to add the new port */
8661         idx = i40e_get_vxlan_port_idx(pf, 0);
8662         if (idx < 0) {
8663                 PMD_DRV_LOG(ERR,
8664                         "Maximum number of UDP ports reached, not adding port %d",
8665                         port);
8666                 return -ENOSPC;
8667         }
8668
8669         ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8670                                         &filter_idx, NULL);
8671         if (ret < 0) {
8672                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8673                 return -1;
8674         }
8675
8676         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8677                          port,  filter_idx);
8678
8679         /* New port: add it and mark its index in the bitmap */
8680         pf->vxlan_ports[idx] = port;
8681         pf->vxlan_bitmap |= (1 << idx);
8682
8683         if (!(pf->flags & I40E_FLAG_VXLAN))
8684                 pf->flags |= I40E_FLAG_VXLAN;
8685
8686         return 0;
8687 }
8688
8689 static int
8690 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8691 {
8692         int idx;
8693         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8694
8695         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8696                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8697                 return -EINVAL;
8698         }
8699
8700         idx = i40e_get_vxlan_port_idx(pf, port);
8701
8702         if (idx < 0) {
8703                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8704                 return -EINVAL;
8705         }
8706
8707         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8708                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8709                 return -1;
8710         }
8711
8712         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8713                         port, idx);
8714
8715         pf->vxlan_ports[idx] = 0;
8716         pf->vxlan_bitmap &= ~(1 << idx);
8717
8718         if (!pf->vxlan_bitmap)
8719                 pf->flags &= ~I40E_FLAG_VXLAN;
8720
8721         return 0;
8722 }
8723
8724 /* Add UDP tunneling port */
8725 static int
8726 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8727                              struct rte_eth_udp_tunnel *udp_tunnel)
8728 {
8729         int ret = 0;
8730         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8731
8732         if (udp_tunnel == NULL)
8733                 return -EINVAL;
8734
8735         switch (udp_tunnel->prot_type) {
8736         case RTE_TUNNEL_TYPE_VXLAN:
8737                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8738                                           I40E_AQC_TUNNEL_TYPE_VXLAN);
8739                 break;
8740         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8741                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8742                                           I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8743                 break;
8744         case RTE_TUNNEL_TYPE_GENEVE:
8745         case RTE_TUNNEL_TYPE_TEREDO:
8746                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8747                 ret = -1;
8748                 break;
8749
8750         default:
8751                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8752                 ret = -1;
8753                 break;
8754         }
8755
8756         return ret;
8757 }
8758
8759 /* Remove UDP tunneling port */
8760 static int
8761 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8762                              struct rte_eth_udp_tunnel *udp_tunnel)
8763 {
8764         int ret = 0;
8765         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8766
8767         if (udp_tunnel == NULL)
8768                 return -EINVAL;
8769
8770         switch (udp_tunnel->prot_type) {
8771         case RTE_TUNNEL_TYPE_VXLAN:
8772         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8773                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8774                 break;
8775         case RTE_TUNNEL_TYPE_GENEVE:
8776         case RTE_TUNNEL_TYPE_TEREDO:
8777                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8778                 ret = -1;
8779                 break;
8780         default:
8781                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8782                 ret = -1;
8783                 break;
8784         }
8785
8786         return ret;
8787 }
8788
8789 /* Calculate the maximum number of contiguous PF queues that are configured */
8790 int
8791 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8792 {
8793         struct rte_eth_dev_data *data = pf->dev_data;
8794         int i, num;
8795         struct i40e_rx_queue *rxq;
8796
8797         num = 0;
8798         for (i = 0; i < pf->lan_nb_qps; i++) {
8799                 rxq = data->rx_queues[i];
8800                 if (rxq && rxq->q_set)
8801                         num++;
8802                 else
8803                         break;
8804         }
8805
8806         return num;
8807 }
8808
8809 /* Reset the global configure of hash function and input sets */
8810 static void
8811 i40e_pf_global_rss_reset(struct i40e_pf *pf)
8812 {
8813         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8814         uint32_t reg, reg_val;
8815         int i;
8816
8817         /* Reset global RSS function sets */
8818         reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8819         if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) {
8820                 reg_val |= I40E_GLQF_CTL_HTOEP_MASK;
8821                 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val);
8822         }
8823
8824         for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) {
8825                 uint64_t inset;
8826                 int j, pctype;
8827
8828                 if (hw->mac.type == I40E_MAC_X722)
8829                         pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i));
8830                 else
8831                         pctype = i;
8832
8833                 /* Reset pctype insets */
8834                 inset = i40e_get_default_input_set(i);
8835                 if (inset) {
8836                         pf->hash_input_set[pctype] = inset;
8837                         inset = i40e_translate_input_set_reg(hw->mac.type,
8838                                                              inset);
8839
8840                         reg = I40E_GLQF_HASH_INSET(0, pctype);
8841                         i40e_check_write_global_reg(hw, reg, (uint32_t)inset);
8842                         reg = I40E_GLQF_HASH_INSET(1, pctype);
8843                         i40e_check_write_global_reg(hw, reg,
8844                                                     (uint32_t)(inset >> 32));
8845
8846                         /* Clear unused mask registers of the pctype */
8847                         for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) {
8848                                 reg = I40E_GLQF_HASH_MSK(j, pctype);
8849                                 i40e_check_write_global_reg(hw, reg, 0);
8850                         }
8851                 }
8852
8853                 /* Reset pctype symmetric sets */
8854                 reg = I40E_GLQF_HSYM(pctype);
8855                 reg_val = i40e_read_rx_ctl(hw, reg);
8856                 if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8857                         reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
8858                         i40e_write_global_rx_ctl(hw, reg, reg_val);
8859                 }
8860         }
8861         I40E_WRITE_FLUSH(hw);
8862 }
8863
8864 int
8865 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
8866 {
8867         struct i40e_hw *hw = &pf->adapter->hw;
8868         uint8_t lut[ETH_RSS_RETA_SIZE_512];
8869         uint32_t i;
8870         int num;
8871
8872         /* If both VMDQ and RSS enabled, not all of PF queues are
8873          * configured. It's necessary to calculate the actual PF
8874          * queues that are configured.
8875          */
8876         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8877                 num = i40e_pf_calc_configured_queues_num(pf);
8878         else
8879                 num = pf->dev_data->nb_rx_queues;
8880
8881         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8882         if (num <= 0)
8883                 return 0;
8884
8885         for (i = 0; i < hw->func_caps.rss_table_size; i++)
8886                 lut[i] = (uint8_t)(i % (uint32_t)num);
8887
8888         return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
8889 }
8890
8891 int
8892 i40e_pf_reset_rss_key(struct i40e_pf *pf)
8893 {
8894         const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8895                         sizeof(uint32_t);
8896         uint8_t *rss_key;
8897
8898         /* Reset key */
8899         rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key;
8900         if (!rss_key ||
8901             pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) {
8902                 static uint32_t rss_key_default[] = {0x6b793944,
8903                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8904                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8905                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8906
8907                 rss_key = (uint8_t *)rss_key_default;
8908         }
8909
8910         return i40e_set_rss_key(pf->main_vsi, rss_key, key_len);
8911 }
8912
8913 static int
8914 i40e_pf_rss_reset(struct i40e_pf *pf)
8915 {
8916         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8917
8918         int ret;
8919
8920         pf->hash_filter_enabled = 0;
8921         i40e_pf_disable_rss(pf);
8922         i40e_set_symmetric_hash_enable_per_port(hw, 0);
8923
8924         if (!pf->support_multi_driver)
8925                 i40e_pf_global_rss_reset(pf);
8926
8927         /* Reset RETA table */
8928         if (pf->adapter->rss_reta_updated == 0) {
8929                 ret = i40e_pf_reset_rss_reta(pf);
8930                 if (ret)
8931                         return ret;
8932         }
8933
8934         return i40e_pf_reset_rss_key(pf);
8935 }
8936
8937 /* Configure RSS */
8938 int
8939 i40e_pf_config_rss(struct i40e_pf *pf)
8940 {
8941         struct i40e_hw *hw;
8942         enum rte_eth_rx_mq_mode mq_mode;
8943         uint64_t rss_hf, hena;
8944         int ret;
8945
8946         ret = i40e_pf_rss_reset(pf);
8947         if (ret) {
8948                 PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled");
8949                 return ret;
8950         }
8951
8952         rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
8953         mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8954         if (!(rss_hf & pf->adapter->flow_types_mask) ||
8955             !(mq_mode & ETH_MQ_RX_RSS_FLAG))
8956                 return 0;
8957
8958         hw = I40E_PF_TO_HW(pf);
8959         hena = i40e_config_hena(pf->adapter, rss_hf);
8960         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
8961         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
8962         I40E_WRITE_FLUSH(hw);
8963
8964         return 0;
8965 }
8966
8967 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8968 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8969 int
8970 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8971 {
8972         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8973         uint32_t val, reg;
8974         int ret = -EINVAL;
8975
8976         if (pf->support_multi_driver) {
8977                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8978                 return -ENOTSUP;
8979         }
8980
8981         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8982         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8983
8984         if (len == 3) {
8985                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8986         } else if (len == 4) {
8987                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8988         } else {
8989                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8990                 return ret;
8991         }
8992
8993         if (reg != val) {
8994                 ret = i40e_aq_debug_write_global_register(hw,
8995                                                    I40E_GL_PRS_FVBM(2),
8996                                                    reg, NULL);
8997                 if (ret != 0)
8998                         return ret;
8999                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
9000                             "with value 0x%08x",
9001                             I40E_GL_PRS_FVBM(2), reg);
9002         } else {
9003                 ret = 0;
9004         }
9005         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
9006                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
9007
9008         return ret;
9009 }
9010
9011 /* Set the symmetric hash enable configurations per port */
9012 void
9013 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
9014 {
9015         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9016
9017         if (enable > 0) {
9018                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)
9019                         return;
9020
9021                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9022         } else {
9023                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK))
9024                         return;
9025
9026                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9027         }
9028         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
9029         I40E_WRITE_FLUSH(hw);
9030 }
9031
9032 /**
9033  * Valid input sets for hash and flow director filters per PCTYPE
9034  */
9035 static uint64_t
9036 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9037                 enum rte_filter_type filter)
9038 {
9039         uint64_t valid;
9040
9041         static const uint64_t valid_hash_inset_table[] = {
9042                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9043                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9044                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9045                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9046                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9047                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9048                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9049                         I40E_INSET_FLEX_PAYLOAD,
9050                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9051                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9052                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9053                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9054                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9055                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9056                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9057                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9058                         I40E_INSET_FLEX_PAYLOAD,
9059                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9060                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9061                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9062                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9063                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9064                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9065                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9066                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9067                         I40E_INSET_FLEX_PAYLOAD,
9068                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9069                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9070                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9071                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9072                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9073                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9074                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9075                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9076                         I40E_INSET_FLEX_PAYLOAD,
9077                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9078                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9079                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9080                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9081                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9082                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9083                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9084                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9085                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9086                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9087                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9088                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9089                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9090                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9091                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9092                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9093                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9094                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9095                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9096                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9097                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9098                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9099                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9100                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9101                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9102                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9103                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9104                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9105                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9106                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9107                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9108                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9109                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9110                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9111                         I40E_INSET_FLEX_PAYLOAD,
9112                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9113                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9114                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9115                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9116                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9117                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9118                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9119                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9120                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9121                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9122                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9123                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9124                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9125                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9126                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9127                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9128                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9129                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9130                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9131                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9132                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9133                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9134                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9135                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9136                         I40E_INSET_FLEX_PAYLOAD,
9137                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9138                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9139                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9140                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9141                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9142                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9143                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9144                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9145                         I40E_INSET_FLEX_PAYLOAD,
9146                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9147                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9148                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9149                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9150                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9151                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9152                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9153                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9154                         I40E_INSET_FLEX_PAYLOAD,
9155                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9156                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9157                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9158                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9159                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9160                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9161                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9162                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9163                         I40E_INSET_FLEX_PAYLOAD,
9164                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9165                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9166                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9167                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9168                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9169                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9170                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9171                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9172                         I40E_INSET_FLEX_PAYLOAD,
9173                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9174                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9175                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9176                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9177                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9178                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9179                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9180                         I40E_INSET_FLEX_PAYLOAD,
9181                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9182                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9183                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9184                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9185                         I40E_INSET_FLEX_PAYLOAD,
9186         };
9187
9188         /**
9189          * Flow director supports only fields defined in
9190          * union rte_eth_fdir_flow.
9191          */
9192         static const uint64_t valid_fdir_inset_table[] = {
9193                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9194                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9195                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9196                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9197                 I40E_INSET_IPV4_TTL,
9198                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9199                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9200                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9201                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9202                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9203                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9204                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9205                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9206                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9207                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9208                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9209                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9210                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9211                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9212                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9213                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9214                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9215                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9216                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9217                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9218                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9219                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9220                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9221                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9222                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9223                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9224                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9225                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9226                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9227                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9228                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9229                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9230                 I40E_INSET_SCTP_VT,
9231                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9232                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9233                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9234                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9235                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9236                 I40E_INSET_IPV4_TTL,
9237                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9238                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9239                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9240                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9241                 I40E_INSET_IPV6_HOP_LIMIT,
9242                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9243                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9244                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9245                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9246                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9247                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9248                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9249                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9250                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9251                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9252                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9253                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9254                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9255                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9256                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9257                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9258                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9259                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9260                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9261                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9262                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9263                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9264                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9265                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9266                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9267                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9268                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9269                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9270                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9271                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9272                 I40E_INSET_SCTP_VT,
9273                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9274                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9275                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9276                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9277                 I40E_INSET_IPV6_HOP_LIMIT,
9278                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9279                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9280                 I40E_INSET_LAST_ETHER_TYPE,
9281         };
9282
9283         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9284                 return 0;
9285         if (filter == RTE_ETH_FILTER_HASH)
9286                 valid = valid_hash_inset_table[pctype];
9287         else
9288                 valid = valid_fdir_inset_table[pctype];
9289
9290         return valid;
9291 }
9292
9293 /**
9294  * Validate if the input set is allowed for a specific PCTYPE
9295  */
9296 int
9297 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9298                 enum rte_filter_type filter, uint64_t inset)
9299 {
9300         uint64_t valid;
9301
9302         valid = i40e_get_valid_input_set(pctype, filter);
9303         if (inset & (~valid))
9304                 return -EINVAL;
9305
9306         return 0;
9307 }
9308
9309 /* default input set fields combination per pctype */
9310 uint64_t
9311 i40e_get_default_input_set(uint16_t pctype)
9312 {
9313         static const uint64_t default_inset_table[] = {
9314                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9315                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9316                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9317                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9318                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9319                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9320                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9321                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9322                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9323                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9324                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9325                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9326                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9327                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9328                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9329                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9330                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9331                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9332                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9333                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9334                         I40E_INSET_SCTP_VT,
9335                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9336                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9337                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9338                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9339                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9340                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9341                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9342                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9343                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9344                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9345                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9346                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9347                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9348                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9349                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9350                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9351                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9352                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9353                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9354                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9355                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9356                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9357                         I40E_INSET_SCTP_VT,
9358                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9359                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9360                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9361                         I40E_INSET_LAST_ETHER_TYPE,
9362         };
9363
9364         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9365                 return 0;
9366
9367         return default_inset_table[pctype];
9368 }
9369
9370 /**
9371  * Translate the input set from bit masks to register aware bit masks
9372  * and vice versa
9373  */
9374 uint64_t
9375 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9376 {
9377         uint64_t val = 0;
9378         uint16_t i;
9379
9380         struct inset_map {
9381                 uint64_t inset;
9382                 uint64_t inset_reg;
9383         };
9384
9385         static const struct inset_map inset_map_common[] = {
9386                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9387                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9388                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9389                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9390                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9391                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9392                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9393                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9394                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9395                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9396                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9397                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9398                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9399                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9400                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9401                 {I40E_INSET_TUNNEL_DMAC,
9402                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9403                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9404                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9405                 {I40E_INSET_TUNNEL_SRC_PORT,
9406                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9407                 {I40E_INSET_TUNNEL_DST_PORT,
9408                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9409                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9410                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9411                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9412                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9413                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9414                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9415                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9416                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9417                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9418         };
9419
9420     /* some different registers map in x722*/
9421         static const struct inset_map inset_map_diff_x722[] = {
9422                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9423                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9424                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9425                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9426         };
9427
9428         static const struct inset_map inset_map_diff_not_x722[] = {
9429                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9430                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9431                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9432                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9433         };
9434
9435         if (input == 0)
9436                 return val;
9437
9438         /* Translate input set to register aware inset */
9439         if (type == I40E_MAC_X722) {
9440                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9441                         if (input & inset_map_diff_x722[i].inset)
9442                                 val |= inset_map_diff_x722[i].inset_reg;
9443                 }
9444         } else {
9445                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9446                         if (input & inset_map_diff_not_x722[i].inset)
9447                                 val |= inset_map_diff_not_x722[i].inset_reg;
9448                 }
9449         }
9450
9451         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9452                 if (input & inset_map_common[i].inset)
9453                         val |= inset_map_common[i].inset_reg;
9454         }
9455
9456         return val;
9457 }
9458
9459 static int
9460 i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start,
9461                             uint32_t pit_reg_count, uint32_t hdr_off)
9462 {
9463         const uint32_t pit_reg_end = pit_reg_start + pit_reg_count;
9464         uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off);
9465         uint32_t i, reg_val, src_off, count;
9466
9467         for (i = pit_reg_start; i < pit_reg_end; i++) {
9468                 reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i));
9469
9470                 src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val);
9471                 count = I40E_GLQF_PIT_FSIZE_GET(reg_val);
9472
9473                 if (src_off <= field_off && (src_off + count) > field_off)
9474                         break;
9475         }
9476
9477         if (i >= pit_reg_end) {
9478                 PMD_DRV_LOG(ERR,
9479                             "Hardware GLQF_PIT configuration does not support this field mask");
9480                 return -1;
9481         }
9482
9483         return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off;
9484 }
9485
9486 int
9487 i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset,
9488                              uint32_t *mask, uint8_t nb_elem)
9489 {
9490         static const uint64_t mask_inset[] = {
9491                 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL,
9492                 I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT };
9493
9494         static const struct {
9495                 uint64_t inset;
9496                 uint32_t mask;
9497                 uint32_t offset;
9498         } inset_mask_offset_map[] = {
9499                 { I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK,
9500                   offsetof(struct rte_ipv4_hdr, type_of_service) },
9501
9502                 { I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK,
9503                   offsetof(struct rte_ipv4_hdr, next_proto_id) },
9504
9505                 { I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK,
9506                   offsetof(struct rte_ipv4_hdr, time_to_live) },
9507
9508                 { I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK,
9509                   offsetof(struct rte_ipv6_hdr, vtc_flow) },
9510
9511                 { I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK,
9512                   offsetof(struct rte_ipv6_hdr, proto) },
9513
9514                 { I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK,
9515                   offsetof(struct rte_ipv6_hdr, hop_limits) },
9516         };
9517
9518         uint32_t i;
9519         int idx = 0;
9520
9521         assert(mask);
9522         if (!inset)
9523                 return 0;
9524
9525         for (i = 0; i < RTE_DIM(mask_inset); i++) {
9526                 /* Clear the inset bit, if no MASK is required,
9527                  * for example proto + ttl
9528                  */
9529                 if ((mask_inset[i] & inset) == mask_inset[i]) {
9530                         inset &= ~mask_inset[i];
9531                         if (!inset)
9532                                 return 0;
9533                 }
9534         }
9535
9536         for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) {
9537                 uint32_t pit_start, pit_count;
9538                 int offset;
9539
9540                 if (!(inset_mask_offset_map[i].inset & inset))
9541                         continue;
9542
9543                 if (inset_mask_offset_map[i].inset &
9544                     (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9545                      I40E_INSET_IPV4_TTL)) {
9546                         pit_start = I40E_GLQF_PIT_IPV4_START;
9547                         pit_count = I40E_GLQF_PIT_IPV4_COUNT;
9548                 } else {
9549                         pit_start = I40E_GLQF_PIT_IPV6_START;
9550                         pit_count = I40E_GLQF_PIT_IPV6_COUNT;
9551                 }
9552
9553                 offset = i40e_get_inset_field_offset(hw, pit_start, pit_count,
9554                                 inset_mask_offset_map[i].offset);
9555
9556                 if (offset < 0)
9557                         return -EINVAL;
9558
9559                 if (idx >= nb_elem) {
9560                         PMD_DRV_LOG(ERR,
9561                                     "Configuration of inset mask out of range %u",
9562                                     nb_elem);
9563                         return -ERANGE;
9564                 }
9565
9566                 mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset,
9567                                                 inset_mask_offset_map[i].mask);
9568                 idx++;
9569         }
9570
9571         return idx;
9572 }
9573
9574 void
9575 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9576 {
9577         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9578
9579         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9580         if (reg != val)
9581                 i40e_write_rx_ctl(hw, addr, val);
9582         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9583                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9584 }
9585
9586 void
9587 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9588 {
9589         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9590         struct rte_eth_dev_data *dev_data =
9591                 ((struct i40e_adapter *)hw->back)->pf.dev_data;
9592         struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
9593
9594         if (reg != val) {
9595                 i40e_write_rx_ctl(hw, addr, val);
9596                 PMD_DRV_LOG(WARNING,
9597                             "i40e device %s changed global register [0x%08x]."
9598                             " original: 0x%08x, new: 0x%08x",
9599                             dev->device->name, addr, reg,
9600                             (uint32_t)i40e_read_rx_ctl(hw, addr));
9601         }
9602 }
9603
9604 static void
9605 i40e_filter_input_set_init(struct i40e_pf *pf)
9606 {
9607         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9608         enum i40e_filter_pctype pctype;
9609         uint64_t input_set, inset_reg;
9610         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9611         int num, i;
9612         uint16_t flow_type;
9613
9614         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9615              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9616                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9617
9618                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9619                         continue;
9620
9621                 input_set = i40e_get_default_input_set(pctype);
9622
9623                 num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9624                                                    I40E_INSET_MASK_NUM_REG);
9625                 if (num < 0)
9626                         return;
9627                 if (pf->support_multi_driver && num > 0) {
9628                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9629                         return;
9630                 }
9631                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9632                                         input_set);
9633
9634                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9635                                       (uint32_t)(inset_reg & UINT32_MAX));
9636                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9637                                      (uint32_t)((inset_reg >>
9638                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
9639                 if (!pf->support_multi_driver) {
9640                         i40e_check_write_global_reg(hw,
9641                                             I40E_GLQF_HASH_INSET(0, pctype),
9642                                             (uint32_t)(inset_reg & UINT32_MAX));
9643                         i40e_check_write_global_reg(hw,
9644                                              I40E_GLQF_HASH_INSET(1, pctype),
9645                                              (uint32_t)((inset_reg >>
9646                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
9647
9648                         for (i = 0; i < num; i++) {
9649                                 i40e_check_write_global_reg(hw,
9650                                                     I40E_GLQF_FD_MSK(i, pctype),
9651                                                     mask_reg[i]);
9652                                 i40e_check_write_global_reg(hw,
9653                                                   I40E_GLQF_HASH_MSK(i, pctype),
9654                                                   mask_reg[i]);
9655                         }
9656                         /*clear unused mask registers of the pctype */
9657                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9658                                 i40e_check_write_global_reg(hw,
9659                                                     I40E_GLQF_FD_MSK(i, pctype),
9660                                                     0);
9661                                 i40e_check_write_global_reg(hw,
9662                                                   I40E_GLQF_HASH_MSK(i, pctype),
9663                                                   0);
9664                         }
9665                 } else {
9666                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9667                 }
9668                 I40E_WRITE_FLUSH(hw);
9669
9670                 /* store the default input set */
9671                 if (!pf->support_multi_driver)
9672                         pf->hash_input_set[pctype] = input_set;
9673                 pf->fdir.input_set[pctype] = input_set;
9674         }
9675 }
9676
9677 int
9678 i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
9679                     uint32_t pctype, bool add)
9680 {
9681         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9682         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9683         uint64_t inset_reg = 0;
9684         int num, i;
9685
9686         if (pf->support_multi_driver) {
9687                 PMD_DRV_LOG(ERR,
9688                             "Modify input set is not permitted when multi-driver enabled.");
9689                 return -EPERM;
9690         }
9691
9692         /* For X722, get translated pctype in fd pctype register */
9693         if (hw->mac.type == I40E_MAC_X722)
9694                 pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
9695
9696         if (add) {
9697                 /* get inset value in register */
9698                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9699                 inset_reg <<= I40E_32_BIT_WIDTH;
9700                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9701                 input_set |= pf->hash_input_set[pctype];
9702         }
9703         num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9704                                            I40E_INSET_MASK_NUM_REG);
9705         if (num < 0)
9706                 return -EINVAL;
9707
9708         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9709
9710         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9711                                     (uint32_t)(inset_reg & UINT32_MAX));
9712         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9713                                     (uint32_t)((inset_reg >>
9714                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
9715
9716         for (i = 0; i < num; i++)
9717                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9718                                             mask_reg[i]);
9719         /*clear unused mask registers of the pctype */
9720         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9721                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9722                                             0);
9723         I40E_WRITE_FLUSH(hw);
9724
9725         pf->hash_input_set[pctype] = input_set;
9726         return 0;
9727 }
9728
9729 /* Convert ethertype filter structure */
9730 static int
9731 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9732                               struct i40e_ethertype_filter *filter)
9733 {
9734         rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
9735                 RTE_ETHER_ADDR_LEN);
9736         filter->input.ether_type = input->ether_type;
9737         filter->flags = input->flags;
9738         filter->queue = input->queue;
9739
9740         return 0;
9741 }
9742
9743 /* Check if there exists the ehtertype filter */
9744 struct i40e_ethertype_filter *
9745 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9746                                 const struct i40e_ethertype_filter_input *input)
9747 {
9748         int ret;
9749
9750         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9751         if (ret < 0)
9752                 return NULL;
9753
9754         return ethertype_rule->hash_map[ret];
9755 }
9756
9757 /* Add ethertype filter in SW list */
9758 static int
9759 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9760                                 struct i40e_ethertype_filter *filter)
9761 {
9762         struct i40e_ethertype_rule *rule = &pf->ethertype;
9763         int ret;
9764
9765         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9766         if (ret < 0) {
9767                 PMD_DRV_LOG(ERR,
9768                             "Failed to insert ethertype filter"
9769                             " to hash table %d!",
9770                             ret);
9771                 return ret;
9772         }
9773         rule->hash_map[ret] = filter;
9774
9775         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9776
9777         return 0;
9778 }
9779
9780 /* Delete ethertype filter in SW list */
9781 int
9782 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9783                              struct i40e_ethertype_filter_input *input)
9784 {
9785         struct i40e_ethertype_rule *rule = &pf->ethertype;
9786         struct i40e_ethertype_filter *filter;
9787         int ret;
9788
9789         ret = rte_hash_del_key(rule->hash_table, input);
9790         if (ret < 0) {
9791                 PMD_DRV_LOG(ERR,
9792                             "Failed to delete ethertype filter"
9793                             " to hash table %d!",
9794                             ret);
9795                 return ret;
9796         }
9797         filter = rule->hash_map[ret];
9798         rule->hash_map[ret] = NULL;
9799
9800         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9801         rte_free(filter);
9802
9803         return 0;
9804 }
9805
9806 /*
9807  * Configure ethertype filter, which can director packet by filtering
9808  * with mac address and ether_type or only ether_type
9809  */
9810 int
9811 i40e_ethertype_filter_set(struct i40e_pf *pf,
9812                         struct rte_eth_ethertype_filter *filter,
9813                         bool add)
9814 {
9815         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9816         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9817         struct i40e_ethertype_filter *ethertype_filter, *node;
9818         struct i40e_ethertype_filter check_filter;
9819         struct i40e_control_filter_stats stats;
9820         uint16_t flags = 0;
9821         int ret;
9822
9823         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9824                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9825                 return -EINVAL;
9826         }
9827         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
9828                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
9829                 PMD_DRV_LOG(ERR,
9830                         "unsupported ether_type(0x%04x) in control packet filter.",
9831                         filter->ether_type);
9832                 return -EINVAL;
9833         }
9834         if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
9835                 PMD_DRV_LOG(WARNING,
9836                         "filter vlan ether_type in first tag is not supported.");
9837
9838         /* Check if there is the filter in SW list */
9839         memset(&check_filter, 0, sizeof(check_filter));
9840         i40e_ethertype_filter_convert(filter, &check_filter);
9841         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9842                                                &check_filter.input);
9843         if (add && node) {
9844                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9845                 return -EINVAL;
9846         }
9847
9848         if (!add && !node) {
9849                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9850                 return -EINVAL;
9851         }
9852
9853         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9854                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9855         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9856                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9857         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9858
9859         memset(&stats, 0, sizeof(stats));
9860         ret = i40e_aq_add_rem_control_packet_filter(hw,
9861                         filter->mac_addr.addr_bytes,
9862                         filter->ether_type, flags,
9863                         pf->main_vsi->seid,
9864                         filter->queue, add, &stats, NULL);
9865
9866         PMD_DRV_LOG(INFO,
9867                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9868                 ret, stats.mac_etype_used, stats.etype_used,
9869                 stats.mac_etype_free, stats.etype_free);
9870         if (ret < 0)
9871                 return -ENOSYS;
9872
9873         /* Add or delete a filter in SW list */
9874         if (add) {
9875                 ethertype_filter = rte_zmalloc("ethertype_filter",
9876                                        sizeof(*ethertype_filter), 0);
9877                 if (ethertype_filter == NULL) {
9878                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9879                         return -ENOMEM;
9880                 }
9881
9882                 rte_memcpy(ethertype_filter, &check_filter,
9883                            sizeof(check_filter));
9884                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9885                 if (ret < 0)
9886                         rte_free(ethertype_filter);
9887         } else {
9888                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9889         }
9890
9891         return ret;
9892 }
9893
9894 static int
9895 i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
9896                       const struct rte_flow_ops **ops)
9897 {
9898         if (dev == NULL)
9899                 return -EINVAL;
9900
9901         *ops = &i40e_flow_ops;
9902         return 0;
9903 }
9904
9905 /*
9906  * Check and enable Extended Tag.
9907  * Enabling Extended Tag is important for 40G performance.
9908  */
9909 static void
9910 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9911 {
9912         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9913         uint32_t buf = 0;
9914         int ret;
9915
9916         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9917                                       PCI_DEV_CAP_REG);
9918         if (ret < 0) {
9919                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9920                             PCI_DEV_CAP_REG);
9921                 return;
9922         }
9923         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9924                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9925                 return;
9926         }
9927
9928         buf = 0;
9929         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9930                                       PCI_DEV_CTRL_REG);
9931         if (ret < 0) {
9932                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9933                             PCI_DEV_CTRL_REG);
9934                 return;
9935         }
9936         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9937                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9938                 return;
9939         }
9940         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9941         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9942                                        PCI_DEV_CTRL_REG);
9943         if (ret < 0) {
9944                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9945                             PCI_DEV_CTRL_REG);
9946                 return;
9947         }
9948 }
9949
9950 /*
9951  * As some registers wouldn't be reset unless a global hardware reset,
9952  * hardware initialization is needed to put those registers into an
9953  * expected initial state.
9954  */
9955 static void
9956 i40e_hw_init(struct rte_eth_dev *dev)
9957 {
9958         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9959
9960         i40e_enable_extended_tag(dev);
9961
9962         /* clear the PF Queue Filter control register */
9963         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9964
9965         /* Disable symmetric hash per port */
9966         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9967 }
9968
9969 /*
9970  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9971  * however this function will return only one highest pctype index,
9972  * which is not quite correct. This is known problem of i40e driver
9973  * and needs to be fixed later.
9974  */
9975 enum i40e_filter_pctype
9976 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9977 {
9978         int i;
9979         uint64_t pctype_mask;
9980
9981         if (flow_type < I40E_FLOW_TYPE_MAX) {
9982                 pctype_mask = adapter->pctypes_tbl[flow_type];
9983                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9984                         if (pctype_mask & (1ULL << i))
9985                                 return (enum i40e_filter_pctype)i;
9986                 }
9987         }
9988         return I40E_FILTER_PCTYPE_INVALID;
9989 }
9990
9991 uint16_t
9992 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9993                         enum i40e_filter_pctype pctype)
9994 {
9995         uint16_t flowtype;
9996         uint64_t pctype_mask = 1ULL << pctype;
9997
9998         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9999              flowtype++) {
10000                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
10001                         return flowtype;
10002         }
10003
10004         return RTE_ETH_FLOW_UNKNOWN;
10005 }
10006
10007 /*
10008  * On X710, performance number is far from the expectation on recent firmware
10009  * versions; on XL710, performance number is also far from the expectation on
10010  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
10011  * mode is enabled and port MAC address is equal to the packet destination MAC
10012  * address. The fix for this issue may not be integrated in the following
10013  * firmware version. So the workaround in software driver is needed. It needs
10014  * to modify the initial values of 3 internal only registers for both X710 and
10015  * XL710. Note that the values for X710 or XL710 could be different, and the
10016  * workaround can be removed when it is fixed in firmware in the future.
10017  */
10018
10019 /* For both X710 and XL710 */
10020 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
10021 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
10022 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
10023
10024 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10025 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
10026
10027 /* For X722 */
10028 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10029 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10030
10031 /* For X710 */
10032 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10033 /* For XL710 */
10034 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10035 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10036
10037 /*
10038  * GL_SWR_PM_UP_THR:
10039  * The value is not impacted from the link speed, its value is set according
10040  * to the total number of ports for a better pipe-monitor configuration.
10041  */
10042 static bool
10043 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10044 {
10045 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10046                 .device_id = (dev),   \
10047                 .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10048
10049 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10050                 .device_id = (dev),   \
10051                 .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10052
10053         static const struct {
10054                 uint16_t device_id;
10055                 uint32_t val;
10056         } swr_pm_table[] = {
10057                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10058                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10059                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10060                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10061                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10062
10063                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10064                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10065                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10066                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10067                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10068                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10069                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10070         };
10071         uint32_t i;
10072
10073         if (value == NULL) {
10074                 PMD_DRV_LOG(ERR, "value is NULL");
10075                 return false;
10076         }
10077
10078         for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10079                 if (hw->device_id == swr_pm_table[i].device_id) {
10080                         *value = swr_pm_table[i].val;
10081
10082                         PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10083                                     "value - 0x%08x",
10084                                     hw->device_id, *value);
10085                         return true;
10086                 }
10087         }
10088
10089         return false;
10090 }
10091
10092 static int
10093 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10094 {
10095         enum i40e_status_code status;
10096         struct i40e_aq_get_phy_abilities_resp phy_ab;
10097         int ret = -ENOTSUP;
10098         int retries = 0;
10099
10100         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10101                                               NULL);
10102
10103         while (status) {
10104                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10105                         status);
10106                 retries++;
10107                 rte_delay_us(100000);
10108                 if  (retries < 5)
10109                         status = i40e_aq_get_phy_capabilities(hw, false,
10110                                         true, &phy_ab, NULL);
10111                 else
10112                         return ret;
10113         }
10114         return 0;
10115 }
10116
10117 static void
10118 i40e_configure_registers(struct i40e_hw *hw)
10119 {
10120         static struct {
10121                 uint32_t addr;
10122                 uint64_t val;
10123         } reg_table[] = {
10124                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10125                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10126                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10127         };
10128         uint64_t reg;
10129         uint32_t i;
10130         int ret;
10131
10132         for (i = 0; i < RTE_DIM(reg_table); i++) {
10133                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10134                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10135                                 reg_table[i].val =
10136                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10137                         else /* For X710/XL710/XXV710 */
10138                                 if (hw->aq.fw_maj_ver < 6)
10139                                         reg_table[i].val =
10140                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10141                                 else
10142                                         reg_table[i].val =
10143                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10144                 }
10145
10146                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10147                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10148                                 reg_table[i].val =
10149                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10150                         else /* For X710/XL710/XXV710 */
10151                                 reg_table[i].val =
10152                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10153                 }
10154
10155                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10156                         uint32_t cfg_val;
10157
10158                         if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10159                                 PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10160                                             "GL_SWR_PM_UP_THR value fixup",
10161                                             hw->device_id);
10162                                 continue;
10163                         }
10164
10165                         reg_table[i].val = cfg_val;
10166                 }
10167
10168                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10169                                                         &reg, NULL);
10170                 if (ret < 0) {
10171                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10172                                                         reg_table[i].addr);
10173                         break;
10174                 }
10175                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10176                                                 reg_table[i].addr, reg);
10177                 if (reg == reg_table[i].val)
10178                         continue;
10179
10180                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10181                                                 reg_table[i].val, NULL);
10182                 if (ret < 0) {
10183                         PMD_DRV_LOG(ERR,
10184                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10185                                 reg_table[i].val, reg_table[i].addr);
10186                         break;
10187                 }
10188                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10189                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10190         }
10191 }
10192
10193 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10194 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10195 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10196 static int
10197 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10198 {
10199         uint32_t reg;
10200         int ret;
10201
10202         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10203                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10204                 return -EINVAL;
10205         }
10206
10207         /* Configure for double VLAN RX stripping */
10208         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10209         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10210                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10211                 ret = i40e_aq_debug_write_register(hw,
10212                                                    I40E_VSI_TSR(vsi->vsi_id),
10213                                                    reg, NULL);
10214                 if (ret < 0) {
10215                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10216                                     vsi->vsi_id);
10217                         return I40E_ERR_CONFIG;
10218                 }
10219         }
10220
10221         /* Configure for double VLAN TX insertion */
10222         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10223         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10224                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10225                 ret = i40e_aq_debug_write_register(hw,
10226                                                    I40E_VSI_L2TAGSTXVALID(
10227                                                    vsi->vsi_id), reg, NULL);
10228                 if (ret < 0) {
10229                         PMD_DRV_LOG(ERR,
10230                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
10231                                 vsi->vsi_id);
10232                         return I40E_ERR_CONFIG;
10233                 }
10234         }
10235
10236         return 0;
10237 }
10238
10239 /**
10240  * i40e_aq_add_mirror_rule
10241  * @hw: pointer to the hardware structure
10242  * @seid: VEB seid to add mirror rule to
10243  * @dst_id: destination vsi seid
10244  * @entries: Buffer which contains the entities to be mirrored
10245  * @count: number of entities contained in the buffer
10246  * @rule_id:the rule_id of the rule to be added
10247  *
10248  * Add a mirror rule for a given veb.
10249  *
10250  **/
10251 static enum i40e_status_code
10252 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10253                         uint16_t seid, uint16_t dst_id,
10254                         uint16_t rule_type, uint16_t *entries,
10255                         uint16_t count, uint16_t *rule_id)
10256 {
10257         struct i40e_aq_desc desc;
10258         struct i40e_aqc_add_delete_mirror_rule cmd;
10259         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10260                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
10261                 &desc.params.raw;
10262         uint16_t buff_len;
10263         enum i40e_status_code status;
10264
10265         i40e_fill_default_direct_cmd_desc(&desc,
10266                                           i40e_aqc_opc_add_mirror_rule);
10267         memset(&cmd, 0, sizeof(cmd));
10268
10269         buff_len = sizeof(uint16_t) * count;
10270         desc.datalen = rte_cpu_to_le_16(buff_len);
10271         if (buff_len > 0)
10272                 desc.flags |= rte_cpu_to_le_16(
10273                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10274         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10275                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10276         cmd.num_entries = rte_cpu_to_le_16(count);
10277         cmd.seid = rte_cpu_to_le_16(seid);
10278         cmd.destination = rte_cpu_to_le_16(dst_id);
10279
10280         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10281         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10282         PMD_DRV_LOG(INFO,
10283                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10284                 hw->aq.asq_last_status, resp->rule_id,
10285                 resp->mirror_rules_used, resp->mirror_rules_free);
10286         *rule_id = rte_le_to_cpu_16(resp->rule_id);
10287
10288         return status;
10289 }
10290
10291 /**
10292  * i40e_aq_del_mirror_rule
10293  * @hw: pointer to the hardware structure
10294  * @seid: VEB seid to add mirror rule to
10295  * @entries: Buffer which contains the entities to be mirrored
10296  * @count: number of entities contained in the buffer
10297  * @rule_id:the rule_id of the rule to be delete
10298  *
10299  * Delete a mirror rule for a given veb.
10300  *
10301  **/
10302 static enum i40e_status_code
10303 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10304                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10305                 uint16_t count, uint16_t rule_id)
10306 {
10307         struct i40e_aq_desc desc;
10308         struct i40e_aqc_add_delete_mirror_rule cmd;
10309         uint16_t buff_len = 0;
10310         enum i40e_status_code status;
10311         void *buff = NULL;
10312
10313         i40e_fill_default_direct_cmd_desc(&desc,
10314                                           i40e_aqc_opc_delete_mirror_rule);
10315         memset(&cmd, 0, sizeof(cmd));
10316         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10317                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10318                                                           I40E_AQ_FLAG_RD));
10319                 cmd.num_entries = count;
10320                 buff_len = sizeof(uint16_t) * count;
10321                 desc.datalen = rte_cpu_to_le_16(buff_len);
10322                 buff = (void *)entries;
10323         } else
10324                 /* rule id is filled in destination field for deleting mirror rule */
10325                 cmd.destination = rte_cpu_to_le_16(rule_id);
10326
10327         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10328                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10329         cmd.seid = rte_cpu_to_le_16(seid);
10330
10331         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10332         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10333
10334         return status;
10335 }
10336
10337 /**
10338  * i40e_mirror_rule_set
10339  * @dev: pointer to the hardware structure
10340  * @mirror_conf: mirror rule info
10341  * @sw_id: mirror rule's sw_id
10342  * @on: enable/disable
10343  *
10344  * set a mirror rule.
10345  *
10346  **/
10347 static int
10348 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10349                         struct rte_eth_mirror_conf *mirror_conf,
10350                         uint8_t sw_id, uint8_t on)
10351 {
10352         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10353         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10354         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10355         struct i40e_mirror_rule *parent = NULL;
10356         uint16_t seid, dst_seid, rule_id;
10357         uint16_t i, j = 0;
10358         int ret;
10359
10360         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10361
10362         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10363                 PMD_DRV_LOG(ERR,
10364                         "mirror rule can not be configured without veb or vfs.");
10365                 return -ENOSYS;
10366         }
10367         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10368                 PMD_DRV_LOG(ERR, "mirror table is full.");
10369                 return -ENOSPC;
10370         }
10371         if (mirror_conf->dst_pool > pf->vf_num) {
10372                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10373                                  mirror_conf->dst_pool);
10374                 return -EINVAL;
10375         }
10376
10377         seid = pf->main_vsi->veb->seid;
10378
10379         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10380                 if (sw_id <= it->index) {
10381                         mirr_rule = it;
10382                         break;
10383                 }
10384                 parent = it;
10385         }
10386         if (mirr_rule && sw_id == mirr_rule->index) {
10387                 if (on) {
10388                         PMD_DRV_LOG(ERR, "mirror rule exists.");
10389                         return -EEXIST;
10390                 } else {
10391                         ret = i40e_aq_del_mirror_rule(hw, seid,
10392                                         mirr_rule->rule_type,
10393                                         mirr_rule->entries,
10394                                         mirr_rule->num_entries, mirr_rule->id);
10395                         if (ret < 0) {
10396                                 PMD_DRV_LOG(ERR,
10397                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
10398                                         ret, hw->aq.asq_last_status);
10399                                 return -ENOSYS;
10400                         }
10401                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10402                         rte_free(mirr_rule);
10403                         pf->nb_mirror_rule--;
10404                         return 0;
10405                 }
10406         } else if (!on) {
10407                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10408                 return -ENOENT;
10409         }
10410
10411         mirr_rule = rte_zmalloc("i40e_mirror_rule",
10412                                 sizeof(struct i40e_mirror_rule) , 0);
10413         if (!mirr_rule) {
10414                 PMD_DRV_LOG(ERR, "failed to allocate memory");
10415                 return I40E_ERR_NO_MEMORY;
10416         }
10417         switch (mirror_conf->rule_type) {
10418         case ETH_MIRROR_VLAN:
10419                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10420                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10421                                 mirr_rule->entries[j] =
10422                                         mirror_conf->vlan.vlan_id[i];
10423                                 j++;
10424                         }
10425                 }
10426                 if (j == 0) {
10427                         PMD_DRV_LOG(ERR, "vlan is not specified.");
10428                         rte_free(mirr_rule);
10429                         return -EINVAL;
10430                 }
10431                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10432                 break;
10433         case ETH_MIRROR_VIRTUAL_POOL_UP:
10434         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10435                 /* check if the specified pool bit is out of range */
10436                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10437                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
10438                         rte_free(mirr_rule);
10439                         return -EINVAL;
10440                 }
10441                 for (i = 0, j = 0; i < pf->vf_num; i++) {
10442                         if (mirror_conf->pool_mask & (1ULL << i)) {
10443                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10444                                 j++;
10445                         }
10446                 }
10447                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10448                         /* add pf vsi to entries */
10449                         mirr_rule->entries[j] = pf->main_vsi_seid;
10450                         j++;
10451                 }
10452                 if (j == 0) {
10453                         PMD_DRV_LOG(ERR, "pool is not specified.");
10454                         rte_free(mirr_rule);
10455                         return -EINVAL;
10456                 }
10457                 /* egress and ingress in aq commands means from switch but not port */
10458                 mirr_rule->rule_type =
10459                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10460                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10461                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10462                 break;
10463         case ETH_MIRROR_UPLINK_PORT:
10464                 /* egress and ingress in aq commands means from switch but not port*/
10465                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10466                 break;
10467         case ETH_MIRROR_DOWNLINK_PORT:
10468                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10469                 break;
10470         default:
10471                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10472                         mirror_conf->rule_type);
10473                 rte_free(mirr_rule);
10474                 return -EINVAL;
10475         }
10476
10477         /* If the dst_pool is equal to vf_num, consider it as PF */
10478         if (mirror_conf->dst_pool == pf->vf_num)
10479                 dst_seid = pf->main_vsi_seid;
10480         else
10481                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10482
10483         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10484                                       mirr_rule->rule_type, mirr_rule->entries,
10485                                       j, &rule_id);
10486         if (ret < 0) {
10487                 PMD_DRV_LOG(ERR,
10488                         "failed to add mirror rule: ret = %d, aq_err = %d.",
10489                         ret, hw->aq.asq_last_status);
10490                 rte_free(mirr_rule);
10491                 return -ENOSYS;
10492         }
10493
10494         mirr_rule->index = sw_id;
10495         mirr_rule->num_entries = j;
10496         mirr_rule->id = rule_id;
10497         mirr_rule->dst_vsi_seid = dst_seid;
10498
10499         if (parent)
10500                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10501         else
10502                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10503
10504         pf->nb_mirror_rule++;
10505         return 0;
10506 }
10507
10508 /**
10509  * i40e_mirror_rule_reset
10510  * @dev: pointer to the device
10511  * @sw_id: mirror rule's sw_id
10512  *
10513  * reset a mirror rule.
10514  *
10515  **/
10516 static int
10517 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10518 {
10519         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10520         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10521         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10522         uint16_t seid;
10523         int ret;
10524
10525         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10526
10527         seid = pf->main_vsi->veb->seid;
10528
10529         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10530                 if (sw_id == it->index) {
10531                         mirr_rule = it;
10532                         break;
10533                 }
10534         }
10535         if (mirr_rule) {
10536                 ret = i40e_aq_del_mirror_rule(hw, seid,
10537                                 mirr_rule->rule_type,
10538                                 mirr_rule->entries,
10539                                 mirr_rule->num_entries, mirr_rule->id);
10540                 if (ret < 0) {
10541                         PMD_DRV_LOG(ERR,
10542                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
10543                                 ret, hw->aq.asq_last_status);
10544                         return -ENOSYS;
10545                 }
10546                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10547                 rte_free(mirr_rule);
10548                 pf->nb_mirror_rule--;
10549         } else {
10550                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10551                 return -ENOENT;
10552         }
10553         return 0;
10554 }
10555
10556 static uint64_t
10557 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10558 {
10559         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10560         uint64_t systim_cycles;
10561
10562         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10563         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10564                         << 32;
10565
10566         return systim_cycles;
10567 }
10568
10569 static uint64_t
10570 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10571 {
10572         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10573         uint64_t rx_tstamp;
10574
10575         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10576         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10577                         << 32;
10578
10579         return rx_tstamp;
10580 }
10581
10582 static uint64_t
10583 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10584 {
10585         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10586         uint64_t tx_tstamp;
10587
10588         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10589         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10590                         << 32;
10591
10592         return tx_tstamp;
10593 }
10594
10595 static void
10596 i40e_start_timecounters(struct rte_eth_dev *dev)
10597 {
10598         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10599         struct i40e_adapter *adapter = dev->data->dev_private;
10600         struct rte_eth_link link;
10601         uint32_t tsync_inc_l;
10602         uint32_t tsync_inc_h;
10603
10604         /* Get current link speed. */
10605         i40e_dev_link_update(dev, 1);
10606         rte_eth_linkstatus_get(dev, &link);
10607
10608         switch (link.link_speed) {
10609         case ETH_SPEED_NUM_40G:
10610         case ETH_SPEED_NUM_25G:
10611                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10612                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10613                 break;
10614         case ETH_SPEED_NUM_10G:
10615                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10616                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10617                 break;
10618         case ETH_SPEED_NUM_1G:
10619                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10620                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10621                 break;
10622         default:
10623                 tsync_inc_l = 0x0;
10624                 tsync_inc_h = 0x0;
10625         }
10626
10627         /* Set the timesync increment value. */
10628         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10629         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10630
10631         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10632         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10633         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10634
10635         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10636         adapter->systime_tc.cc_shift = 0;
10637         adapter->systime_tc.nsec_mask = 0;
10638
10639         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10640         adapter->rx_tstamp_tc.cc_shift = 0;
10641         adapter->rx_tstamp_tc.nsec_mask = 0;
10642
10643         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10644         adapter->tx_tstamp_tc.cc_shift = 0;
10645         adapter->tx_tstamp_tc.nsec_mask = 0;
10646 }
10647
10648 static int
10649 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10650 {
10651         struct i40e_adapter *adapter = dev->data->dev_private;
10652
10653         adapter->systime_tc.nsec += delta;
10654         adapter->rx_tstamp_tc.nsec += delta;
10655         adapter->tx_tstamp_tc.nsec += delta;
10656
10657         return 0;
10658 }
10659
10660 static int
10661 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10662 {
10663         uint64_t ns;
10664         struct i40e_adapter *adapter = dev->data->dev_private;
10665
10666         ns = rte_timespec_to_ns(ts);
10667
10668         /* Set the timecounters to a new value. */
10669         adapter->systime_tc.nsec = ns;
10670         adapter->rx_tstamp_tc.nsec = ns;
10671         adapter->tx_tstamp_tc.nsec = ns;
10672
10673         return 0;
10674 }
10675
10676 static int
10677 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10678 {
10679         uint64_t ns, systime_cycles;
10680         struct i40e_adapter *adapter = dev->data->dev_private;
10681
10682         systime_cycles = i40e_read_systime_cyclecounter(dev);
10683         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10684         *ts = rte_ns_to_timespec(ns);
10685
10686         return 0;
10687 }
10688
10689 static int
10690 i40e_timesync_enable(struct rte_eth_dev *dev)
10691 {
10692         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10693         uint32_t tsync_ctl_l;
10694         uint32_t tsync_ctl_h;
10695
10696         /* Stop the timesync system time. */
10697         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10698         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10699         /* Reset the timesync system time value. */
10700         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10701         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10702
10703         i40e_start_timecounters(dev);
10704
10705         /* Clear timesync registers. */
10706         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10707         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10708         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10709         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10710         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10711         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10712
10713         /* Enable timestamping of PTP packets. */
10714         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10715         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10716
10717         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10718         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10719         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10720
10721         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10722         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10723
10724         return 0;
10725 }
10726
10727 static int
10728 i40e_timesync_disable(struct rte_eth_dev *dev)
10729 {
10730         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10731         uint32_t tsync_ctl_l;
10732         uint32_t tsync_ctl_h;
10733
10734         /* Disable timestamping of transmitted PTP packets. */
10735         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10736         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10737
10738         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10739         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10740
10741         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10742         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10743
10744         /* Reset the timesync increment value. */
10745         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10746         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10747
10748         return 0;
10749 }
10750
10751 static int
10752 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10753                                 struct timespec *timestamp, uint32_t flags)
10754 {
10755         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10756         struct i40e_adapter *adapter = dev->data->dev_private;
10757         uint32_t sync_status;
10758         uint32_t index = flags & 0x03;
10759         uint64_t rx_tstamp_cycles;
10760         uint64_t ns;
10761
10762         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10763         if ((sync_status & (1 << index)) == 0)
10764                 return -EINVAL;
10765
10766         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10767         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10768         *timestamp = rte_ns_to_timespec(ns);
10769
10770         return 0;
10771 }
10772
10773 static int
10774 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10775                                 struct timespec *timestamp)
10776 {
10777         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10778         struct i40e_adapter *adapter = dev->data->dev_private;
10779         uint32_t sync_status;
10780         uint64_t tx_tstamp_cycles;
10781         uint64_t ns;
10782
10783         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10784         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10785                 return -EINVAL;
10786
10787         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10788         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10789         *timestamp = rte_ns_to_timespec(ns);
10790
10791         return 0;
10792 }
10793
10794 /*
10795  * i40e_parse_dcb_configure - parse dcb configure from user
10796  * @dev: the device being configured
10797  * @dcb_cfg: pointer of the result of parse
10798  * @*tc_map: bit map of enabled traffic classes
10799  *
10800  * Returns 0 on success, negative value on failure
10801  */
10802 static int
10803 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10804                          struct i40e_dcbx_config *dcb_cfg,
10805                          uint8_t *tc_map)
10806 {
10807         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10808         uint8_t i, tc_bw, bw_lf;
10809
10810         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10811
10812         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10813         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10814                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10815                 return -EINVAL;
10816         }
10817
10818         /* assume each tc has the same bw */
10819         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10820         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10821                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10822         /* to ensure the sum of tcbw is equal to 100 */
10823         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10824         for (i = 0; i < bw_lf; i++)
10825                 dcb_cfg->etscfg.tcbwtable[i]++;
10826
10827         /* assume each tc has the same Transmission Selection Algorithm */
10828         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10829                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10830
10831         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10832                 dcb_cfg->etscfg.prioritytable[i] =
10833                                 dcb_rx_conf->dcb_tc[i];
10834
10835         /* FW needs one App to configure HW */
10836         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10837         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10838         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10839         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10840
10841         if (dcb_rx_conf->nb_tcs == 0)
10842                 *tc_map = 1; /* tc0 only */
10843         else
10844                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10845
10846         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10847                 dcb_cfg->pfc.willing = 0;
10848                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10849                 dcb_cfg->pfc.pfcenable = *tc_map;
10850         }
10851         return 0;
10852 }
10853
10854
10855 static enum i40e_status_code
10856 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10857                               struct i40e_aqc_vsi_properties_data *info,
10858                               uint8_t enabled_tcmap)
10859 {
10860         enum i40e_status_code ret;
10861         int i, total_tc = 0;
10862         uint16_t qpnum_per_tc, bsf, qp_idx;
10863         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10864         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10865         uint16_t used_queues;
10866
10867         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10868         if (ret != I40E_SUCCESS)
10869                 return ret;
10870
10871         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10872                 if (enabled_tcmap & (1 << i))
10873                         total_tc++;
10874         }
10875         if (total_tc == 0)
10876                 total_tc = 1;
10877         vsi->enabled_tc = enabled_tcmap;
10878
10879         /* different VSI has different queues assigned */
10880         if (vsi->type == I40E_VSI_MAIN)
10881                 used_queues = dev_data->nb_rx_queues -
10882                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10883         else if (vsi->type == I40E_VSI_VMDQ2)
10884                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10885         else {
10886                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10887                 return I40E_ERR_NO_AVAILABLE_VSI;
10888         }
10889
10890         qpnum_per_tc = used_queues / total_tc;
10891         /* Number of queues per enabled TC */
10892         if (qpnum_per_tc == 0) {
10893                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10894                 return I40E_ERR_INVALID_QP_ID;
10895         }
10896         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10897                                 I40E_MAX_Q_PER_TC);
10898         bsf = rte_bsf32(qpnum_per_tc);
10899
10900         /**
10901          * Configure TC and queue mapping parameters, for enabled TC,
10902          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10903          * default queue will serve it.
10904          */
10905         qp_idx = 0;
10906         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10907                 if (vsi->enabled_tc & (1 << i)) {
10908                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10909                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10910                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10911                         qp_idx += qpnum_per_tc;
10912                 } else
10913                         info->tc_mapping[i] = 0;
10914         }
10915
10916         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10917         if (vsi->type == I40E_VSI_SRIOV) {
10918                 info->mapping_flags |=
10919                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10920                 for (i = 0; i < vsi->nb_qps; i++)
10921                         info->queue_mapping[i] =
10922                                 rte_cpu_to_le_16(vsi->base_queue + i);
10923         } else {
10924                 info->mapping_flags |=
10925                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10926                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10927         }
10928         info->valid_sections |=
10929                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10930
10931         return I40E_SUCCESS;
10932 }
10933
10934 /*
10935  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10936  * @veb: VEB to be configured
10937  * @tc_map: enabled TC bitmap
10938  *
10939  * Returns 0 on success, negative value on failure
10940  */
10941 static enum i40e_status_code
10942 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10943 {
10944         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10945         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10946         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10947         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10948         enum i40e_status_code ret = I40E_SUCCESS;
10949         int i;
10950         uint32_t bw_max;
10951
10952         /* Check if enabled_tc is same as existing or new TCs */
10953         if (veb->enabled_tc == tc_map)
10954                 return ret;
10955
10956         /* configure tc bandwidth */
10957         memset(&veb_bw, 0, sizeof(veb_bw));
10958         veb_bw.tc_valid_bits = tc_map;
10959         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10960         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10961                 if (tc_map & BIT_ULL(i))
10962                         veb_bw.tc_bw_share_credits[i] = 1;
10963         }
10964         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10965                                                    &veb_bw, NULL);
10966         if (ret) {
10967                 PMD_INIT_LOG(ERR,
10968                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10969                         hw->aq.asq_last_status);
10970                 return ret;
10971         }
10972
10973         memset(&ets_query, 0, sizeof(ets_query));
10974         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10975                                                    &ets_query, NULL);
10976         if (ret != I40E_SUCCESS) {
10977                 PMD_DRV_LOG(ERR,
10978                         "Failed to get switch_comp ETS configuration %u",
10979                         hw->aq.asq_last_status);
10980                 return ret;
10981         }
10982         memset(&bw_query, 0, sizeof(bw_query));
10983         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10984                                                   &bw_query, NULL);
10985         if (ret != I40E_SUCCESS) {
10986                 PMD_DRV_LOG(ERR,
10987                         "Failed to get switch_comp bandwidth configuration %u",
10988                         hw->aq.asq_last_status);
10989                 return ret;
10990         }
10991
10992         /* store and print out BW info */
10993         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10994         veb->bw_info.bw_max = ets_query.tc_bw_max;
10995         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10996         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10997         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10998                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10999                      I40E_16_BIT_WIDTH);
11000         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11001                 veb->bw_info.bw_ets_share_credits[i] =
11002                                 bw_query.tc_bw_share_credits[i];
11003                 veb->bw_info.bw_ets_credits[i] =
11004                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
11005                 /* 4 bits per TC, 4th bit is reserved */
11006                 veb->bw_info.bw_ets_max[i] =
11007                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
11008                                   RTE_LEN2MASK(3, uint8_t));
11009                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
11010                             veb->bw_info.bw_ets_share_credits[i]);
11011                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
11012                             veb->bw_info.bw_ets_credits[i]);
11013                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
11014                             veb->bw_info.bw_ets_max[i]);
11015         }
11016
11017         veb->enabled_tc = tc_map;
11018
11019         return ret;
11020 }
11021
11022
11023 /*
11024  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
11025  * @vsi: VSI to be configured
11026  * @tc_map: enabled TC bitmap
11027  *
11028  * Returns 0 on success, negative value on failure
11029  */
11030 static enum i40e_status_code
11031 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11032 {
11033         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11034         struct i40e_vsi_context ctxt;
11035         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11036         enum i40e_status_code ret = I40E_SUCCESS;
11037         int i;
11038
11039         /* Check if enabled_tc is same as existing or new TCs */
11040         if (vsi->enabled_tc == tc_map)
11041                 return ret;
11042
11043         /* configure tc bandwidth */
11044         memset(&bw_data, 0, sizeof(bw_data));
11045         bw_data.tc_valid_bits = tc_map;
11046         /* Enable ETS TCs with equal BW Share for now across all VSIs */
11047         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11048                 if (tc_map & BIT_ULL(i))
11049                         bw_data.tc_bw_credits[i] = 1;
11050         }
11051         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11052         if (ret) {
11053                 PMD_INIT_LOG(ERR,
11054                         "AQ command Config VSI BW allocation per TC failed = %d",
11055                         hw->aq.asq_last_status);
11056                 goto out;
11057         }
11058         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11059                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11060
11061         /* Update Queue Pairs Mapping for currently enabled UPs */
11062         ctxt.seid = vsi->seid;
11063         ctxt.pf_num = hw->pf_id;
11064         ctxt.vf_num = 0;
11065         ctxt.uplink_seid = vsi->uplink_seid;
11066         ctxt.info = vsi->info;
11067         i40e_get_cap(hw);
11068         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11069         if (ret)
11070                 goto out;
11071
11072         /* Update the VSI after updating the VSI queue-mapping information */
11073         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11074         if (ret) {
11075                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11076                         hw->aq.asq_last_status);
11077                 goto out;
11078         }
11079         /* update the local VSI info with updated queue map */
11080         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11081                                         sizeof(vsi->info.tc_mapping));
11082         rte_memcpy(&vsi->info.queue_mapping,
11083                         &ctxt.info.queue_mapping,
11084                 sizeof(vsi->info.queue_mapping));
11085         vsi->info.mapping_flags = ctxt.info.mapping_flags;
11086         vsi->info.valid_sections = 0;
11087
11088         /* query and update current VSI BW information */
11089         ret = i40e_vsi_get_bw_config(vsi);
11090         if (ret) {
11091                 PMD_INIT_LOG(ERR,
11092                          "Failed updating vsi bw info, err %s aq_err %s",
11093                          i40e_stat_str(hw, ret),
11094                          i40e_aq_str(hw, hw->aq.asq_last_status));
11095                 goto out;
11096         }
11097
11098         vsi->enabled_tc = tc_map;
11099
11100 out:
11101         return ret;
11102 }
11103
11104 /*
11105  * i40e_dcb_hw_configure - program the dcb setting to hw
11106  * @pf: pf the configuration is taken on
11107  * @new_cfg: new configuration
11108  * @tc_map: enabled TC bitmap
11109  *
11110  * Returns 0 on success, negative value on failure
11111  */
11112 static enum i40e_status_code
11113 i40e_dcb_hw_configure(struct i40e_pf *pf,
11114                       struct i40e_dcbx_config *new_cfg,
11115                       uint8_t tc_map)
11116 {
11117         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11118         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11119         struct i40e_vsi *main_vsi = pf->main_vsi;
11120         struct i40e_vsi_list *vsi_list;
11121         enum i40e_status_code ret;
11122         int i;
11123         uint32_t val;
11124
11125         /* Use the FW API if FW > v4.4*/
11126         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11127               (hw->aq.fw_maj_ver >= 5))) {
11128                 PMD_INIT_LOG(ERR,
11129                         "FW < v4.4, can not use FW LLDP API to configure DCB");
11130                 return I40E_ERR_FIRMWARE_API_VERSION;
11131         }
11132
11133         /* Check if need reconfiguration */
11134         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11135                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11136                 return I40E_SUCCESS;
11137         }
11138
11139         /* Copy the new config to the current config */
11140         *old_cfg = *new_cfg;
11141         old_cfg->etsrec = old_cfg->etscfg;
11142         ret = i40e_set_dcb_config(hw);
11143         if (ret) {
11144                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11145                          i40e_stat_str(hw, ret),
11146                          i40e_aq_str(hw, hw->aq.asq_last_status));
11147                 return ret;
11148         }
11149         /* set receive Arbiter to RR mode and ETS scheme by default */
11150         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11151                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11152                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11153                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11154                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11155                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11156                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11157                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11158                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11159                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11160                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11161                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11162                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11163         }
11164         /* get local mib to check whether it is configured correctly */
11165         /* IEEE mode */
11166         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11167         /* Get Local DCB Config */
11168         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11169                                      &hw->local_dcbx_config);
11170
11171         /* if Veb is created, need to update TC of it at first */
11172         if (main_vsi->veb) {
11173                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11174                 if (ret)
11175                         PMD_INIT_LOG(WARNING,
11176                                  "Failed configuring TC for VEB seid=%d",
11177                                  main_vsi->veb->seid);
11178         }
11179         /* Update each VSI */
11180         i40e_vsi_config_tc(main_vsi, tc_map);
11181         if (main_vsi->veb) {
11182                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11183                         /* Beside main VSI and VMDQ VSIs, only enable default
11184                          * TC for other VSIs
11185                          */
11186                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11187                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11188                                                          tc_map);
11189                         else
11190                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11191                                                          I40E_DEFAULT_TCMAP);
11192                         if (ret)
11193                                 PMD_INIT_LOG(WARNING,
11194                                         "Failed configuring TC for VSI seid=%d",
11195                                         vsi_list->vsi->seid);
11196                         /* continue */
11197                 }
11198         }
11199         return I40E_SUCCESS;
11200 }
11201
11202 /*
11203  * i40e_dcb_init_configure - initial dcb config
11204  * @dev: device being configured
11205  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11206  *
11207  * Returns 0 on success, negative value on failure
11208  */
11209 int
11210 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11211 {
11212         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11213         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11214         int i, ret = 0;
11215
11216         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11217                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11218                 return -ENOTSUP;
11219         }
11220
11221         /* DCB initialization:
11222          * Update DCB configuration from the Firmware and configure
11223          * LLDP MIB change event.
11224          */
11225         if (sw_dcb == TRUE) {
11226                 /* Stopping lldp is necessary for DPDK, but it will cause
11227                  * DCB init failed. For i40e_init_dcb(), the prerequisite
11228                  * for successful initialization of DCB is that LLDP is
11229                  * enabled. So it is needed to start lldp before DCB init
11230                  * and stop it after initialization.
11231                  */
11232                 ret = i40e_aq_start_lldp(hw, true, NULL);
11233                 if (ret != I40E_SUCCESS)
11234                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11235
11236                 ret = i40e_init_dcb(hw, true);
11237                 /* If lldp agent is stopped, the return value from
11238                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11239                  * adminq status. Otherwise, it should return success.
11240                  */
11241                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11242                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11243                         memset(&hw->local_dcbx_config, 0,
11244                                 sizeof(struct i40e_dcbx_config));
11245                         /* set dcb default configuration */
11246                         hw->local_dcbx_config.etscfg.willing = 0;
11247                         hw->local_dcbx_config.etscfg.maxtcs = 0;
11248                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11249                         hw->local_dcbx_config.etscfg.tsatable[0] =
11250                                                 I40E_IEEE_TSA_ETS;
11251                         /* all UPs mapping to TC0 */
11252                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11253                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11254                         hw->local_dcbx_config.etsrec =
11255                                 hw->local_dcbx_config.etscfg;
11256                         hw->local_dcbx_config.pfc.willing = 0;
11257                         hw->local_dcbx_config.pfc.pfccap =
11258                                                 I40E_MAX_TRAFFIC_CLASS;
11259                         /* FW needs one App to configure HW */
11260                         hw->local_dcbx_config.numapps = 1;
11261                         hw->local_dcbx_config.app[0].selector =
11262                                                 I40E_APP_SEL_ETHTYPE;
11263                         hw->local_dcbx_config.app[0].priority = 3;
11264                         hw->local_dcbx_config.app[0].protocolid =
11265                                                 I40E_APP_PROTOID_FCOE;
11266                         ret = i40e_set_dcb_config(hw);
11267                         if (ret) {
11268                                 PMD_INIT_LOG(ERR,
11269                                         "default dcb config fails. err = %d, aq_err = %d.",
11270                                         ret, hw->aq.asq_last_status);
11271                                 return -ENOSYS;
11272                         }
11273                 } else {
11274                         PMD_INIT_LOG(ERR,
11275                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
11276                                 ret, hw->aq.asq_last_status);
11277                         return -ENOTSUP;
11278                 }
11279
11280                 if (i40e_need_stop_lldp(dev)) {
11281                         ret = i40e_aq_stop_lldp(hw, true, true, NULL);
11282                         if (ret != I40E_SUCCESS)
11283                                 PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
11284                 }
11285         } else {
11286                 ret = i40e_aq_start_lldp(hw, true, NULL);
11287                 if (ret != I40E_SUCCESS)
11288                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11289
11290                 ret = i40e_init_dcb(hw, true);
11291                 if (!ret) {
11292                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11293                                 PMD_INIT_LOG(ERR,
11294                                         "HW doesn't support DCBX offload.");
11295                                 return -ENOTSUP;
11296                         }
11297                 } else {
11298                         PMD_INIT_LOG(ERR,
11299                                 "DCBX configuration failed, err = %d, aq_err = %d.",
11300                                 ret, hw->aq.asq_last_status);
11301                         return -ENOTSUP;
11302                 }
11303         }
11304         return 0;
11305 }
11306
11307 /*
11308  * i40e_dcb_setup - setup dcb related config
11309  * @dev: device being configured
11310  *
11311  * Returns 0 on success, negative value on failure
11312  */
11313 static int
11314 i40e_dcb_setup(struct rte_eth_dev *dev)
11315 {
11316         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11317         struct i40e_dcbx_config dcb_cfg;
11318         uint8_t tc_map = 0;
11319         int ret = 0;
11320
11321         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11322                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11323                 return -ENOTSUP;
11324         }
11325
11326         if (pf->vf_num != 0)
11327                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11328
11329         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11330         if (ret) {
11331                 PMD_INIT_LOG(ERR, "invalid dcb config");
11332                 return -EINVAL;
11333         }
11334         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11335         if (ret) {
11336                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11337                 return -ENOSYS;
11338         }
11339
11340         return 0;
11341 }
11342
11343 static int
11344 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11345                       struct rte_eth_dcb_info *dcb_info)
11346 {
11347         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11348         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11349         struct i40e_vsi *vsi = pf->main_vsi;
11350         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11351         uint16_t bsf, tc_mapping;
11352         int i, j = 0;
11353
11354         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11355                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11356         else
11357                 dcb_info->nb_tcs = 1;
11358         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11359                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11360         for (i = 0; i < dcb_info->nb_tcs; i++)
11361                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11362
11363         /* get queue mapping if vmdq is disabled */
11364         if (!pf->nb_cfg_vmdq_vsi) {
11365                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11366                         if (!(vsi->enabled_tc & (1 << i)))
11367                                 continue;
11368                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11369                         dcb_info->tc_queue.tc_rxq[j][i].base =
11370                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11371                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11372                         dcb_info->tc_queue.tc_txq[j][i].base =
11373                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11374                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11375                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11376                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11377                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11378                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11379                 }
11380                 return 0;
11381         }
11382
11383         /* get queue mapping if vmdq is enabled */
11384         do {
11385                 vsi = pf->vmdq[j].vsi;
11386                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11387                         if (!(vsi->enabled_tc & (1 << i)))
11388                                 continue;
11389                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11390                         dcb_info->tc_queue.tc_rxq[j][i].base =
11391                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11392                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11393                         dcb_info->tc_queue.tc_txq[j][i].base =
11394                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11395                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11396                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11397                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11398                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11399                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11400                 }
11401                 j++;
11402         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11403         return 0;
11404 }
11405
11406 static int
11407 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11408 {
11409         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11410         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11411         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11412         uint16_t msix_intr;
11413
11414         msix_intr = intr_handle->intr_vec[queue_id];
11415         if (msix_intr == I40E_MISC_VEC_ID)
11416                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11417                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
11418                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11419                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11420         else
11421                 I40E_WRITE_REG(hw,
11422                                I40E_PFINT_DYN_CTLN(msix_intr -
11423                                                    I40E_RX_VEC_START),
11424                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
11425                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11426                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11427
11428         I40E_WRITE_FLUSH(hw);
11429         rte_intr_ack(&pci_dev->intr_handle);
11430
11431         return 0;
11432 }
11433
11434 static int
11435 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11436 {
11437         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11438         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11439         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11440         uint16_t msix_intr;
11441
11442         msix_intr = intr_handle->intr_vec[queue_id];
11443         if (msix_intr == I40E_MISC_VEC_ID)
11444                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11445                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11446         else
11447                 I40E_WRITE_REG(hw,
11448                                I40E_PFINT_DYN_CTLN(msix_intr -
11449                                                    I40E_RX_VEC_START),
11450                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11451         I40E_WRITE_FLUSH(hw);
11452
11453         return 0;
11454 }
11455
11456 /**
11457  * This function is used to check if the register is valid.
11458  * Below is the valid registers list for X722 only:
11459  * 0x2b800--0x2bb00
11460  * 0x38700--0x38a00
11461  * 0x3d800--0x3db00
11462  * 0x208e00--0x209000
11463  * 0x20be00--0x20c000
11464  * 0x263c00--0x264000
11465  * 0x265c00--0x266000
11466  */
11467 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
11468 {
11469         if ((type != I40E_MAC_X722) &&
11470             ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
11471              (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
11472              (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
11473              (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
11474              (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
11475              (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
11476              (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
11477                 return 0;
11478         else
11479                 return 1;
11480 }
11481
11482 static int i40e_get_regs(struct rte_eth_dev *dev,
11483                          struct rte_dev_reg_info *regs)
11484 {
11485         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11486         uint32_t *ptr_data = regs->data;
11487         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11488         const struct i40e_reg_info *reg_info;
11489
11490         if (ptr_data == NULL) {
11491                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11492                 regs->width = sizeof(uint32_t);
11493                 return 0;
11494         }
11495
11496         /* The first few registers have to be read using AQ operations */
11497         reg_idx = 0;
11498         while (i40e_regs_adminq[reg_idx].name) {
11499                 reg_info = &i40e_regs_adminq[reg_idx++];
11500                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11501                         for (arr_idx2 = 0;
11502                                         arr_idx2 <= reg_info->count2;
11503                                         arr_idx2++) {
11504                                 reg_offset = arr_idx * reg_info->stride1 +
11505                                         arr_idx2 * reg_info->stride2;
11506                                 reg_offset += reg_info->base_addr;
11507                                 ptr_data[reg_offset >> 2] =
11508                                         i40e_read_rx_ctl(hw, reg_offset);
11509                         }
11510         }
11511
11512         /* The remaining registers can be read using primitives */
11513         reg_idx = 0;
11514         while (i40e_regs_others[reg_idx].name) {
11515                 reg_info = &i40e_regs_others[reg_idx++];
11516                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11517                         for (arr_idx2 = 0;
11518                                         arr_idx2 <= reg_info->count2;
11519                                         arr_idx2++) {
11520                                 reg_offset = arr_idx * reg_info->stride1 +
11521                                         arr_idx2 * reg_info->stride2;
11522                                 reg_offset += reg_info->base_addr;
11523                                 if (!i40e_valid_regs(hw->mac.type, reg_offset))
11524                                         ptr_data[reg_offset >> 2] = 0;
11525                                 else
11526                                         ptr_data[reg_offset >> 2] =
11527                                                 I40E_READ_REG(hw, reg_offset);
11528                         }
11529         }
11530
11531         return 0;
11532 }
11533
11534 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11535 {
11536         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11537
11538         /* Convert word count to byte count */
11539         return hw->nvm.sr_size << 1;
11540 }
11541
11542 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11543                            struct rte_dev_eeprom_info *eeprom)
11544 {
11545         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11546         uint16_t *data = eeprom->data;
11547         uint16_t offset, length, cnt_words;
11548         int ret_code;
11549
11550         offset = eeprom->offset >> 1;
11551         length = eeprom->length >> 1;
11552         cnt_words = length;
11553
11554         if (offset > hw->nvm.sr_size ||
11555                 offset + length > hw->nvm.sr_size) {
11556                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11557                 return -EINVAL;
11558         }
11559
11560         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11561
11562         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11563         if (ret_code != I40E_SUCCESS || cnt_words != length) {
11564                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11565                 return -EIO;
11566         }
11567
11568         return 0;
11569 }
11570
11571 static int i40e_get_module_info(struct rte_eth_dev *dev,
11572                                 struct rte_eth_dev_module_info *modinfo)
11573 {
11574         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11575         uint32_t sff8472_comp = 0;
11576         uint32_t sff8472_swap = 0;
11577         uint32_t sff8636_rev = 0;
11578         i40e_status status;
11579         uint32_t type = 0;
11580
11581         /* Check if firmware supports reading module EEPROM. */
11582         if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11583                 PMD_DRV_LOG(ERR,
11584                             "Module EEPROM memory read not supported. "
11585                             "Please update the NVM image.\n");
11586                 return -EINVAL;
11587         }
11588
11589         status = i40e_update_link_info(hw);
11590         if (status)
11591                 return -EIO;
11592
11593         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11594                 PMD_DRV_LOG(ERR,
11595                             "Cannot read module EEPROM memory. "
11596                             "No module connected.\n");
11597                 return -EINVAL;
11598         }
11599
11600         type = hw->phy.link_info.module_type[0];
11601
11602         switch (type) {
11603         case I40E_MODULE_TYPE_SFP:
11604                 status = i40e_aq_get_phy_register(hw,
11605                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11606                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
11607                                 I40E_MODULE_SFF_8472_COMP,
11608                                 &sff8472_comp, NULL);
11609                 if (status)
11610                         return -EIO;
11611
11612                 status = i40e_aq_get_phy_register(hw,
11613                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11614                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
11615                                 I40E_MODULE_SFF_8472_SWAP,
11616                                 &sff8472_swap, NULL);
11617                 if (status)
11618                         return -EIO;
11619
11620                 /* Check if the module requires address swap to access
11621                  * the other EEPROM memory page.
11622                  */
11623                 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11624                         PMD_DRV_LOG(WARNING,
11625                                     "Module address swap to access "
11626                                     "page 0xA2 is not supported.\n");
11627                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11628                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11629                 } else if (sff8472_comp == 0x00) {
11630                         /* Module is not SFF-8472 compliant */
11631                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11632                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11633                 } else {
11634                         modinfo->type = RTE_ETH_MODULE_SFF_8472;
11635                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11636                 }
11637                 break;
11638         case I40E_MODULE_TYPE_QSFP_PLUS:
11639                 /* Read from memory page 0. */
11640                 status = i40e_aq_get_phy_register(hw,
11641                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11642                                 0, 1,
11643                                 I40E_MODULE_REVISION_ADDR,
11644                                 &sff8636_rev, NULL);
11645                 if (status)
11646                         return -EIO;
11647                 /* Determine revision compliance byte */
11648                 if (sff8636_rev > 0x02) {
11649                         /* Module is SFF-8636 compliant */
11650                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
11651                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11652                 } else {
11653                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
11654                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11655                 }
11656                 break;
11657         case I40E_MODULE_TYPE_QSFP28:
11658                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
11659                 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11660                 break;
11661         default:
11662                 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11663                 return -EINVAL;
11664         }
11665         return 0;
11666 }
11667
11668 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11669                                   struct rte_dev_eeprom_info *info)
11670 {
11671         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11672         bool is_sfp = false;
11673         i40e_status status;
11674         uint8_t *data;
11675         uint32_t value = 0;
11676         uint32_t i;
11677
11678         if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11679                 is_sfp = true;
11680
11681         data = info->data;
11682         for (i = 0; i < info->length; i++) {
11683                 u32 offset = i + info->offset;
11684                 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11685
11686                 /* Check if we need to access the other memory page */
11687                 if (is_sfp) {
11688                         if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11689                                 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11690                                 addr = I40E_I2C_EEPROM_DEV_ADDR2;
11691                         }
11692                 } else {
11693                         while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11694                                 /* Compute memory page number and offset. */
11695                                 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11696                                 addr++;
11697                         }
11698                 }
11699                 status = i40e_aq_get_phy_register(hw,
11700                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11701                                 addr, 1, offset, &value, NULL);
11702                 if (status)
11703                         return -EIO;
11704                 data[i] = (uint8_t)value;
11705         }
11706         return 0;
11707 }
11708
11709 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11710                                      struct rte_ether_addr *mac_addr)
11711 {
11712         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11713         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11714         struct i40e_vsi *vsi = pf->main_vsi;
11715         struct i40e_mac_filter_info mac_filter;
11716         struct i40e_mac_filter *f;
11717         int ret;
11718
11719         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
11720                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11721                 return -EINVAL;
11722         }
11723
11724         TAILQ_FOREACH(f, &vsi->mac_list, next) {
11725                 if (rte_is_same_ether_addr(&pf->dev_addr,
11726                                                 &f->mac_info.mac_addr))
11727                         break;
11728         }
11729
11730         if (f == NULL) {
11731                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11732                 return -EIO;
11733         }
11734
11735         mac_filter = f->mac_info;
11736         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11737         if (ret != I40E_SUCCESS) {
11738                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11739                 return -EIO;
11740         }
11741         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11742         ret = i40e_vsi_add_mac(vsi, &mac_filter);
11743         if (ret != I40E_SUCCESS) {
11744                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11745                 return -EIO;
11746         }
11747         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11748
11749         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11750                                         mac_addr->addr_bytes, NULL);
11751         if (ret != I40E_SUCCESS) {
11752                 PMD_DRV_LOG(ERR, "Failed to change mac");
11753                 return -EIO;
11754         }
11755
11756         return 0;
11757 }
11758
11759 static int
11760 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11761 {
11762         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11763         struct rte_eth_dev_data *dev_data = pf->dev_data;
11764         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11765         int ret = 0;
11766
11767         /* check if mtu is within the allowed range */
11768         if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
11769                 return -EINVAL;
11770
11771         /* mtu setting is forbidden if port is start */
11772         if (dev_data->dev_started) {
11773                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11774                             dev_data->port_id);
11775                 return -EBUSY;
11776         }
11777
11778         if (frame_size > I40E_ETH_MAX_LEN)
11779                 dev_data->dev_conf.rxmode.offloads |=
11780                         DEV_RX_OFFLOAD_JUMBO_FRAME;
11781         else
11782                 dev_data->dev_conf.rxmode.offloads &=
11783                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
11784
11785         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11786
11787         return ret;
11788 }
11789
11790 /* Restore ethertype filter */
11791 static void
11792 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11793 {
11794         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11795         struct i40e_ethertype_filter_list
11796                 *ethertype_list = &pf->ethertype.ethertype_list;
11797         struct i40e_ethertype_filter *f;
11798         struct i40e_control_filter_stats stats;
11799         uint16_t flags;
11800
11801         TAILQ_FOREACH(f, ethertype_list, rules) {
11802                 flags = 0;
11803                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11804                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11805                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11806                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11807                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11808
11809                 memset(&stats, 0, sizeof(stats));
11810                 i40e_aq_add_rem_control_packet_filter(hw,
11811                                             f->input.mac_addr.addr_bytes,
11812                                             f->input.ether_type,
11813                                             flags, pf->main_vsi->seid,
11814                                             f->queue, 1, &stats, NULL);
11815         }
11816         PMD_DRV_LOG(INFO, "Ethertype filter:"
11817                     " mac_etype_used = %u, etype_used = %u,"
11818                     " mac_etype_free = %u, etype_free = %u",
11819                     stats.mac_etype_used, stats.etype_used,
11820                     stats.mac_etype_free, stats.etype_free);
11821 }
11822
11823 /* Restore tunnel filter */
11824 static void
11825 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11826 {
11827         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11828         struct i40e_vsi *vsi;
11829         struct i40e_pf_vf *vf;
11830         struct i40e_tunnel_filter_list
11831                 *tunnel_list = &pf->tunnel.tunnel_list;
11832         struct i40e_tunnel_filter *f;
11833         struct i40e_aqc_cloud_filters_element_bb cld_filter;
11834         bool big_buffer = 0;
11835
11836         TAILQ_FOREACH(f, tunnel_list, rules) {
11837                 if (!f->is_to_vf)
11838                         vsi = pf->main_vsi;
11839                 else {
11840                         vf = &pf->vfs[f->vf_id];
11841                         vsi = vf->vsi;
11842                 }
11843                 memset(&cld_filter, 0, sizeof(cld_filter));
11844                 rte_ether_addr_copy((struct rte_ether_addr *)
11845                                 &f->input.outer_mac,
11846                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
11847                 rte_ether_addr_copy((struct rte_ether_addr *)
11848                                 &f->input.inner_mac,
11849                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
11850                 cld_filter.element.inner_vlan = f->input.inner_vlan;
11851                 cld_filter.element.flags = f->input.flags;
11852                 cld_filter.element.tenant_id = f->input.tenant_id;
11853                 cld_filter.element.queue_number = f->queue;
11854                 rte_memcpy(cld_filter.general_fields,
11855                            f->input.general_fields,
11856                            sizeof(f->input.general_fields));
11857
11858                 if (((f->input.flags &
11859                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11860                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11861                     ((f->input.flags &
11862                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11863                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11864                     ((f->input.flags &
11865                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11866                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
11867                         big_buffer = 1;
11868
11869                 if (big_buffer)
11870                         i40e_aq_add_cloud_filters_bb(hw,
11871                                         vsi->seid, &cld_filter, 1);
11872                 else
11873                         i40e_aq_add_cloud_filters(hw, vsi->seid,
11874                                                   &cld_filter.element, 1);
11875         }
11876 }
11877
11878 static void
11879 i40e_filter_restore(struct i40e_pf *pf)
11880 {
11881         i40e_ethertype_filter_restore(pf);
11882         i40e_tunnel_filter_restore(pf);
11883         i40e_fdir_filter_restore(pf);
11884         (void)i40e_hash_filter_restore(pf);
11885 }
11886
11887 bool
11888 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11889 {
11890         if (strcmp(dev->device->driver->name, drv->driver.name))
11891                 return false;
11892
11893         return true;
11894 }
11895
11896 bool
11897 is_i40e_supported(struct rte_eth_dev *dev)
11898 {
11899         return is_device_supported(dev, &rte_i40e_pmd);
11900 }
11901
11902 struct i40e_customized_pctype*
11903 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11904 {
11905         int i;
11906
11907         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11908                 if (pf->customized_pctype[i].index == index)
11909                         return &pf->customized_pctype[i];
11910         }
11911         return NULL;
11912 }
11913
11914 static int
11915 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11916                               uint32_t pkg_size, uint32_t proto_num,
11917                               struct rte_pmd_i40e_proto_info *proto,
11918                               enum rte_pmd_i40e_package_op op)
11919 {
11920         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11921         uint32_t pctype_num;
11922         struct rte_pmd_i40e_ptype_info *pctype;
11923         uint32_t buff_size;
11924         struct i40e_customized_pctype *new_pctype = NULL;
11925         uint8_t proto_id;
11926         uint8_t pctype_value;
11927         char name[64];
11928         uint32_t i, j, n;
11929         int ret;
11930
11931         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11932             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11933                 PMD_DRV_LOG(ERR, "Unsupported operation.");
11934                 return -1;
11935         }
11936
11937         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11938                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
11939                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11940         if (ret) {
11941                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11942                 return -1;
11943         }
11944         if (!pctype_num) {
11945                 PMD_DRV_LOG(INFO, "No new pctype added");
11946                 return -1;
11947         }
11948
11949         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11950         pctype = rte_zmalloc("new_pctype", buff_size, 0);
11951         if (!pctype) {
11952                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11953                 return -1;
11954         }
11955         /* get information about new pctype list */
11956         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11957                                         (uint8_t *)pctype, buff_size,
11958                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11959         if (ret) {
11960                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11961                 rte_free(pctype);
11962                 return -1;
11963         }
11964
11965         /* Update customized pctype. */
11966         for (i = 0; i < pctype_num; i++) {
11967                 pctype_value = pctype[i].ptype_id;
11968                 memset(name, 0, sizeof(name));
11969                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11970                         proto_id = pctype[i].protocols[j];
11971                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11972                                 continue;
11973                         for (n = 0; n < proto_num; n++) {
11974                                 if (proto[n].proto_id != proto_id)
11975                                         continue;
11976                                 strlcat(name, proto[n].name, sizeof(name));
11977                                 strlcat(name, "_", sizeof(name));
11978                                 break;
11979                         }
11980                 }
11981                 name[strlen(name) - 1] = '\0';
11982                 PMD_DRV_LOG(INFO, "name = %s\n", name);
11983                 if (!strcmp(name, "GTPC"))
11984                         new_pctype =
11985                                 i40e_find_customized_pctype(pf,
11986                                                       I40E_CUSTOMIZED_GTPC);
11987                 else if (!strcmp(name, "GTPU_IPV4"))
11988                         new_pctype =
11989                                 i40e_find_customized_pctype(pf,
11990                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11991                 else if (!strcmp(name, "GTPU_IPV6"))
11992                         new_pctype =
11993                                 i40e_find_customized_pctype(pf,
11994                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11995                 else if (!strcmp(name, "GTPU"))
11996                         new_pctype =
11997                                 i40e_find_customized_pctype(pf,
11998                                                       I40E_CUSTOMIZED_GTPU);
11999                 else if (!strcmp(name, "IPV4_L2TPV3"))
12000                         new_pctype =
12001                                 i40e_find_customized_pctype(pf,
12002                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
12003                 else if (!strcmp(name, "IPV6_L2TPV3"))
12004                         new_pctype =
12005                                 i40e_find_customized_pctype(pf,
12006                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
12007                 else if (!strcmp(name, "IPV4_ESP"))
12008                         new_pctype =
12009                                 i40e_find_customized_pctype(pf,
12010                                                 I40E_CUSTOMIZED_ESP_IPV4);
12011                 else if (!strcmp(name, "IPV6_ESP"))
12012                         new_pctype =
12013                                 i40e_find_customized_pctype(pf,
12014                                                 I40E_CUSTOMIZED_ESP_IPV6);
12015                 else if (!strcmp(name, "IPV4_UDP_ESP"))
12016                         new_pctype =
12017                                 i40e_find_customized_pctype(pf,
12018                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
12019                 else if (!strcmp(name, "IPV6_UDP_ESP"))
12020                         new_pctype =
12021                                 i40e_find_customized_pctype(pf,
12022                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
12023                 else if (!strcmp(name, "IPV4_AH"))
12024                         new_pctype =
12025                                 i40e_find_customized_pctype(pf,
12026                                                 I40E_CUSTOMIZED_AH_IPV4);
12027                 else if (!strcmp(name, "IPV6_AH"))
12028                         new_pctype =
12029                                 i40e_find_customized_pctype(pf,
12030                                                 I40E_CUSTOMIZED_AH_IPV6);
12031                 if (new_pctype) {
12032                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
12033                                 new_pctype->pctype = pctype_value;
12034                                 new_pctype->valid = true;
12035                         } else {
12036                                 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
12037                                 new_pctype->valid = false;
12038                         }
12039                 }
12040         }
12041
12042         rte_free(pctype);
12043         return 0;
12044 }
12045
12046 static int
12047 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
12048                              uint32_t pkg_size, uint32_t proto_num,
12049                              struct rte_pmd_i40e_proto_info *proto,
12050                              enum rte_pmd_i40e_package_op op)
12051 {
12052         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
12053         uint16_t port_id = dev->data->port_id;
12054         uint32_t ptype_num;
12055         struct rte_pmd_i40e_ptype_info *ptype;
12056         uint32_t buff_size;
12057         uint8_t proto_id;
12058         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
12059         uint32_t i, j, n;
12060         bool in_tunnel;
12061         int ret;
12062
12063         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12064             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12065                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12066                 return -1;
12067         }
12068
12069         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12070                 rte_pmd_i40e_ptype_mapping_reset(port_id);
12071                 return 0;
12072         }
12073
12074         /* get information about new ptype num */
12075         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12076                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
12077                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
12078         if (ret) {
12079                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
12080                 return ret;
12081         }
12082         if (!ptype_num) {
12083                 PMD_DRV_LOG(INFO, "No new ptype added");
12084                 return -1;
12085         }
12086
12087         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
12088         ptype = rte_zmalloc("new_ptype", buff_size, 0);
12089         if (!ptype) {
12090                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12091                 return -1;
12092         }
12093
12094         /* get information about new ptype list */
12095         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12096                                         (uint8_t *)ptype, buff_size,
12097                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
12098         if (ret) {
12099                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
12100                 rte_free(ptype);
12101                 return ret;
12102         }
12103
12104         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
12105         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
12106         if (!ptype_mapping) {
12107                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12108                 rte_free(ptype);
12109                 return -1;
12110         }
12111
12112         /* Update ptype mapping table. */
12113         for (i = 0; i < ptype_num; i++) {
12114                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
12115                 ptype_mapping[i].sw_ptype = 0;
12116                 in_tunnel = false;
12117                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12118                         proto_id = ptype[i].protocols[j];
12119                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12120                                 continue;
12121                         for (n = 0; n < proto_num; n++) {
12122                                 if (proto[n].proto_id != proto_id)
12123                                         continue;
12124                                 memset(name, 0, sizeof(name));
12125                                 strcpy(name, proto[n].name);
12126                                 PMD_DRV_LOG(INFO, "name = %s\n", name);
12127                                 if (!strncasecmp(name, "PPPOE", 5))
12128                                         ptype_mapping[i].sw_ptype |=
12129                                                 RTE_PTYPE_L2_ETHER_PPPOE;
12130                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12131                                          !in_tunnel) {
12132                                         ptype_mapping[i].sw_ptype |=
12133                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12134                                         ptype_mapping[i].sw_ptype |=
12135                                                 RTE_PTYPE_L4_FRAG;
12136                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12137                                            in_tunnel) {
12138                                         ptype_mapping[i].sw_ptype |=
12139                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12140                                         ptype_mapping[i].sw_ptype |=
12141                                                 RTE_PTYPE_INNER_L4_FRAG;
12142                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
12143                                         ptype_mapping[i].sw_ptype |=
12144                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12145                                         in_tunnel = true;
12146                                 } else if (!strncasecmp(name, "IPV4", 4) &&
12147                                            !in_tunnel)
12148                                         ptype_mapping[i].sw_ptype |=
12149                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12150                                 else if (!strncasecmp(name, "IPV4", 4) &&
12151                                          in_tunnel)
12152                                         ptype_mapping[i].sw_ptype |=
12153                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12154                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12155                                          !in_tunnel) {
12156                                         ptype_mapping[i].sw_ptype |=
12157                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12158                                         ptype_mapping[i].sw_ptype |=
12159                                                 RTE_PTYPE_L4_FRAG;
12160                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12161                                            in_tunnel) {
12162                                         ptype_mapping[i].sw_ptype |=
12163                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12164                                         ptype_mapping[i].sw_ptype |=
12165                                                 RTE_PTYPE_INNER_L4_FRAG;
12166                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
12167                                         ptype_mapping[i].sw_ptype |=
12168                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12169                                         in_tunnel = true;
12170                                 } else if (!strncasecmp(name, "IPV6", 4) &&
12171                                            !in_tunnel)
12172                                         ptype_mapping[i].sw_ptype |=
12173                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12174                                 else if (!strncasecmp(name, "IPV6", 4) &&
12175                                          in_tunnel)
12176                                         ptype_mapping[i].sw_ptype |=
12177                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12178                                 else if (!strncasecmp(name, "UDP", 3) &&
12179                                          !in_tunnel)
12180                                         ptype_mapping[i].sw_ptype |=
12181                                                 RTE_PTYPE_L4_UDP;
12182                                 else if (!strncasecmp(name, "UDP", 3) &&
12183                                          in_tunnel)
12184                                         ptype_mapping[i].sw_ptype |=
12185                                                 RTE_PTYPE_INNER_L4_UDP;
12186                                 else if (!strncasecmp(name, "TCP", 3) &&
12187                                          !in_tunnel)
12188                                         ptype_mapping[i].sw_ptype |=
12189                                                 RTE_PTYPE_L4_TCP;
12190                                 else if (!strncasecmp(name, "TCP", 3) &&
12191                                          in_tunnel)
12192                                         ptype_mapping[i].sw_ptype |=
12193                                                 RTE_PTYPE_INNER_L4_TCP;
12194                                 else if (!strncasecmp(name, "SCTP", 4) &&
12195                                          !in_tunnel)
12196                                         ptype_mapping[i].sw_ptype |=
12197                                                 RTE_PTYPE_L4_SCTP;
12198                                 else if (!strncasecmp(name, "SCTP", 4) &&
12199                                          in_tunnel)
12200                                         ptype_mapping[i].sw_ptype |=
12201                                                 RTE_PTYPE_INNER_L4_SCTP;
12202                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12203                                           !strncasecmp(name, "ICMPV6", 6)) &&
12204                                          !in_tunnel)
12205                                         ptype_mapping[i].sw_ptype |=
12206                                                 RTE_PTYPE_L4_ICMP;
12207                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12208                                           !strncasecmp(name, "ICMPV6", 6)) &&
12209                                          in_tunnel)
12210                                         ptype_mapping[i].sw_ptype |=
12211                                                 RTE_PTYPE_INNER_L4_ICMP;
12212                                 else if (!strncasecmp(name, "GTPC", 4)) {
12213                                         ptype_mapping[i].sw_ptype |=
12214                                                 RTE_PTYPE_TUNNEL_GTPC;
12215                                         in_tunnel = true;
12216                                 } else if (!strncasecmp(name, "GTPU", 4)) {
12217                                         ptype_mapping[i].sw_ptype |=
12218                                                 RTE_PTYPE_TUNNEL_GTPU;
12219                                         in_tunnel = true;
12220                                 } else if (!strncasecmp(name, "ESP", 3)) {
12221                                         ptype_mapping[i].sw_ptype |=
12222                                                 RTE_PTYPE_TUNNEL_ESP;
12223                                         in_tunnel = true;
12224                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
12225                                         ptype_mapping[i].sw_ptype |=
12226                                                 RTE_PTYPE_TUNNEL_GRENAT;
12227                                         in_tunnel = true;
12228                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12229                                            !strncasecmp(name, "L2TPV2", 6) ||
12230                                            !strncasecmp(name, "L2TPV3", 6)) {
12231                                         ptype_mapping[i].sw_ptype |=
12232                                                 RTE_PTYPE_TUNNEL_L2TP;
12233                                         in_tunnel = true;
12234                                 }
12235
12236                                 break;
12237                         }
12238                 }
12239         }
12240
12241         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12242                                                 ptype_num, 0);
12243         if (ret)
12244                 PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
12245
12246         rte_free(ptype_mapping);
12247         rte_free(ptype);
12248         return ret;
12249 }
12250
12251 void
12252 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12253                             uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12254 {
12255         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12256         uint32_t proto_num;
12257         struct rte_pmd_i40e_proto_info *proto;
12258         uint32_t buff_size;
12259         uint32_t i;
12260         int ret;
12261
12262         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12263             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12264                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12265                 return;
12266         }
12267
12268         /* get information about protocol number */
12269         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12270                                        (uint8_t *)&proto_num, sizeof(proto_num),
12271                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12272         if (ret) {
12273                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
12274                 return;
12275         }
12276         if (!proto_num) {
12277                 PMD_DRV_LOG(INFO, "No new protocol added");
12278                 return;
12279         }
12280
12281         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12282         proto = rte_zmalloc("new_proto", buff_size, 0);
12283         if (!proto) {
12284                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12285                 return;
12286         }
12287
12288         /* get information about protocol list */
12289         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12290                                         (uint8_t *)proto, buff_size,
12291                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12292         if (ret) {
12293                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
12294                 rte_free(proto);
12295                 return;
12296         }
12297
12298         /* Check if GTP is supported. */
12299         for (i = 0; i < proto_num; i++) {
12300                 if (!strncmp(proto[i].name, "GTP", 3)) {
12301                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12302                                 pf->gtp_support = true;
12303                         else
12304                                 pf->gtp_support = false;
12305                         break;
12306                 }
12307         }
12308
12309         /* Check if ESP is supported. */
12310         for (i = 0; i < proto_num; i++) {
12311                 if (!strncmp(proto[i].name, "ESP", 3)) {
12312                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12313                                 pf->esp_support = true;
12314                         else
12315                                 pf->esp_support = false;
12316                         break;
12317                 }
12318         }
12319
12320         /* Update customized pctype info */
12321         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12322                                             proto_num, proto, op);
12323         if (ret)
12324                 PMD_DRV_LOG(INFO, "No pctype is updated.");
12325
12326         /* Update customized ptype info */
12327         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12328                                            proto_num, proto, op);
12329         if (ret)
12330                 PMD_DRV_LOG(INFO, "No ptype is updated.");
12331
12332         rte_free(proto);
12333 }
12334
12335 /* Create a QinQ cloud filter
12336  *
12337  * The Fortville NIC has limited resources for tunnel filters,
12338  * so we can only reuse existing filters.
12339  *
12340  * In step 1 we define which Field Vector fields can be used for
12341  * filter types.
12342  * As we do not have the inner tag defined as a field,
12343  * we have to define it first, by reusing one of L1 entries.
12344  *
12345  * In step 2 we are replacing one of existing filter types with
12346  * a new one for QinQ.
12347  * As we reusing L1 and replacing L2, some of the default filter
12348  * types will disappear,which depends on L1 and L2 entries we reuse.
12349  *
12350  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12351  *
12352  * 1.   Create L1 filter of outer vlan (12b) which will be in use
12353  *              later when we define the cloud filter.
12354  *      a.      Valid_flags.replace_cloud = 0
12355  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
12356  *      c.      New_filter = 0x10
12357  *      d.      TR bit = 0xff (optional, not used here)
12358  *      e.      Buffer – 2 entries:
12359  *              i.      Byte 0 = 8 (outer vlan FV index).
12360  *                      Byte 1 = 0 (rsv)
12361  *                      Byte 2-3 = 0x0fff
12362  *              ii.     Byte 0 = 37 (inner vlan FV index).
12363  *                      Byte 1 =0 (rsv)
12364  *                      Byte 2-3 = 0x0fff
12365  *
12366  * Step 2:
12367  * 2.   Create cloud filter using two L1 filters entries: stag and
12368  *              new filter(outer vlan+ inner vlan)
12369  *      a.      Valid_flags.replace_cloud = 1
12370  *      b.      Old_filter = 1 (instead of outer IP)
12371  *      c.      New_filter = 0x10
12372  *      d.      Buffer – 2 entries:
12373  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
12374  *                      Byte 1-3 = 0 (rsv)
12375  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12376  *                      Byte 9-11 = 0 (rsv)
12377  */
12378 static int
12379 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12380 {
12381         int ret = -ENOTSUP;
12382         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12383         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12384         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12385         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
12386
12387         if (pf->support_multi_driver) {
12388                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12389                 return ret;
12390         }
12391
12392         /* Init */
12393         memset(&filter_replace, 0,
12394                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12395         memset(&filter_replace_buf, 0,
12396                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12397
12398         /* create L1 filter */
12399         filter_replace.old_filter_type =
12400                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12401         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12402         filter_replace.tr_bit = 0;
12403
12404         /* Prepare the buffer, 2 entries */
12405         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12406         filter_replace_buf.data[0] |=
12407                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12408         /* Field Vector 12b mask */
12409         filter_replace_buf.data[2] = 0xff;
12410         filter_replace_buf.data[3] = 0x0f;
12411         filter_replace_buf.data[4] =
12412                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12413         filter_replace_buf.data[4] |=
12414                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12415         /* Field Vector 12b mask */
12416         filter_replace_buf.data[6] = 0xff;
12417         filter_replace_buf.data[7] = 0x0f;
12418         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12419                         &filter_replace_buf);
12420         if (ret != I40E_SUCCESS)
12421                 return ret;
12422
12423         if (filter_replace.old_filter_type !=
12424             filter_replace.new_filter_type)
12425                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
12426                             " original: 0x%x, new: 0x%x",
12427                             dev->device->name,
12428                             filter_replace.old_filter_type,
12429                             filter_replace.new_filter_type);
12430
12431         /* Apply the second L2 cloud filter */
12432         memset(&filter_replace, 0,
12433                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12434         memset(&filter_replace_buf, 0,
12435                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12436
12437         /* create L2 filter, input for L2 filter will be L1 filter  */
12438         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12439         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12440         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12441
12442         /* Prepare the buffer, 2 entries */
12443         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12444         filter_replace_buf.data[0] |=
12445                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12446         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12447         filter_replace_buf.data[4] |=
12448                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12449         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12450                         &filter_replace_buf);
12451         if (!ret && (filter_replace.old_filter_type !=
12452                      filter_replace.new_filter_type))
12453                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
12454                             " original: 0x%x, new: 0x%x",
12455                             dev->device->name,
12456                             filter_replace.old_filter_type,
12457                             filter_replace.new_filter_type);
12458
12459         return ret;
12460 }
12461
12462 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
12463 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
12464 #ifdef RTE_ETHDEV_DEBUG_RX
12465 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_rx, rx, DEBUG);
12466 #endif
12467 #ifdef RTE_ETHDEV_DEBUG_TX
12468 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_tx, tx, DEBUG);
12469 #endif
12470
12471 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12472                               ETH_I40E_FLOATING_VEB_ARG "=1"
12473                               ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
12474                               ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12475                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1");