net/i40e: fix device startup resource release
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29 #include <rte_bitmap.h>
30 #include <rte_os_shim.h>
31
32 #include "i40e_logs.h"
33 #include "base/i40e_prototype.h"
34 #include "base/i40e_adminq_cmd.h"
35 #include "base/i40e_type.h"
36 #include "base/i40e_register.h"
37 #include "base/i40e_dcb.h"
38 #include "i40e_ethdev.h"
39 #include "i40e_rxtx.h"
40 #include "i40e_pf.h"
41 #include "i40e_regs.h"
42 #include "rte_pmd_i40e.h"
43 #include "i40e_hash.h"
44
45 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
46 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
47 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
48 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG   "queue-num-per-vf"
49 #define ETH_I40E_VF_MSG_CFG             "vf_msg_cfg"
50
51 #define I40E_CLEAR_PXE_WAIT_MS     200
52 #define I40E_VSI_TSR_QINQ_STRIP         0x4010
53 #define I40E_VSI_TSR(_i)        (0x00050800 + ((_i) * 4))
54
55 /* Maximun number of capability elements */
56 #define I40E_MAX_CAP_ELE_NUM       128
57
58 /* Wait count and interval */
59 #define I40E_CHK_Q_ENA_COUNT       1000
60 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
61
62 /* Maximun number of VSI */
63 #define I40E_MAX_NUM_VSIS          (384UL)
64
65 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
66
67 /* Flow control default timer */
68 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
69
70 /* Flow control enable fwd bit */
71 #define I40E_PRTMAC_FWD_CTRL   0x00000001
72
73 /* Receive Packet Buffer size */
74 #define I40E_RXPBSIZE (968 * 1024)
75
76 /* Kilobytes shift */
77 #define I40E_KILOSHIFT 10
78
79 /* Flow control default high water */
80 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
81
82 /* Flow control default low water */
83 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
84
85 /* Receive Average Packet Size in Byte*/
86 #define I40E_PACKET_AVERAGE_SIZE 128
87
88 /* Mask of PF interrupt causes */
89 #define I40E_PFINT_ICR0_ENA_MASK ( \
90                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
91                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
92                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
93                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
94                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
95                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
96                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
97                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
98                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
99
100 #define I40E_FLOW_TYPES ( \
101         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
104         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
105         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
106         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
107         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
108         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
109         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
110         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
111         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
112
113 /* Additional timesync values. */
114 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
115 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
116 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
117 #define I40E_PRTTSYN_TSYNENA     0x80000000
118 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
119 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
120
121 /**
122  * Below are values for writing un-exposed registers suggested
123  * by silicon experts
124  */
125 /* Destination MAC address */
126 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
127 /* Source MAC address */
128 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
129 /* Outer (S-Tag) VLAN tag in the outer L2 header */
130 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
131 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
132 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
133 /* Single VLAN tag in the inner L2 header */
134 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
135 /* Source IPv4 address */
136 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
137 /* Destination IPv4 address */
138 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
139 /* Source IPv4 address for X722 */
140 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
141 /* Destination IPv4 address for X722 */
142 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
143 /* IPv4 Protocol for X722 */
144 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
145 /* IPv4 Time to Live for X722 */
146 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
147 /* IPv4 Type of Service (TOS) */
148 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
149 /* IPv4 Protocol */
150 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
151 /* IPv4 Time to Live */
152 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
153 /* Source IPv6 address */
154 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
155 /* Destination IPv6 address */
156 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
157 /* IPv6 Traffic Class (TC) */
158 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
159 /* IPv6 Next Header */
160 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
161 /* IPv6 Hop Limit */
162 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
163 /* Source L4 port */
164 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
165 /* Destination L4 port */
166 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
167 /* SCTP verification tag */
168 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
169 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
170 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
171 /* Source port of tunneling UDP */
172 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
173 /* Destination port of tunneling UDP */
174 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
175 /* UDP Tunneling ID, NVGRE/GRE key */
176 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
177 /* Last ether type */
178 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
179 /* Tunneling outer destination IPv4 address */
180 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
181 /* Tunneling outer destination IPv6 address */
182 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
183 /* 1st word of flex payload */
184 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
185 /* 2nd word of flex payload */
186 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
187 /* 3rd word of flex payload */
188 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
189 /* 4th word of flex payload */
190 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
191 /* 5th word of flex payload */
192 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
193 /* 6th word of flex payload */
194 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
195 /* 7th word of flex payload */
196 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
197 /* 8th word of flex payload */
198 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
199 /* all 8 words flex payload */
200 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
201 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
202
203 #define I40E_TRANSLATE_INSET 0
204 #define I40E_TRANSLATE_REG   1
205
206 #define I40E_INSET_IPV4_TOS_MASK        0x0000FF00UL
207 #define I40E_INSET_IPV4_TTL_MASK        0x000000FFUL
208 #define I40E_INSET_IPV4_PROTO_MASK      0x0000FF00UL
209 #define I40E_INSET_IPV6_TC_MASK         0x0000F00FUL
210 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x0000FF00UL
211 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000000FFUL
212
213 /* PCI offset for querying capability */
214 #define PCI_DEV_CAP_REG            0xA4
215 /* PCI offset for enabling/disabling Extended Tag */
216 #define PCI_DEV_CTRL_REG           0xA8
217 /* Bit mask of Extended Tag capability */
218 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
219 /* Bit shift of Extended Tag enable/disable */
220 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
221 /* Bit mask of Extended Tag enable/disable */
222 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
223
224 #define I40E_GLQF_PIT_IPV4_START        2
225 #define I40E_GLQF_PIT_IPV4_COUNT        2
226 #define I40E_GLQF_PIT_IPV6_START        4
227 #define I40E_GLQF_PIT_IPV6_COUNT        2
228
229 #define I40E_GLQF_PIT_SOURCE_OFF_GET(a) \
230                                 (((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \
231                                  I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
232
233 #define I40E_GLQF_PIT_DEST_OFF_GET(a) \
234                                 (((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \
235                                  I40E_GLQF_PIT_DEST_OFF_SHIFT)
236
237 #define I40E_GLQF_PIT_FSIZE_GET(a)      (((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \
238                                          I40E_GLQF_PIT_FSIZE_SHIFT)
239
240 #define I40E_GLQF_PIT_BUILD(off, mask)  (((off) << 16) | (mask))
241 #define I40E_FDIR_FIELD_OFFSET(a)       ((a) >> 1)
242
243 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
244 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
245 static int i40e_dev_configure(struct rte_eth_dev *dev);
246 static int i40e_dev_start(struct rte_eth_dev *dev);
247 static int i40e_dev_stop(struct rte_eth_dev *dev);
248 static int i40e_dev_close(struct rte_eth_dev *dev);
249 static int  i40e_dev_reset(struct rte_eth_dev *dev);
250 static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
251 static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
252 static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
253 static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
254 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
255 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
256 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
257                                struct rte_eth_stats *stats);
258 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
259                                struct rte_eth_xstat *xstats, unsigned n);
260 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
261                                      struct rte_eth_xstat_name *xstats_names,
262                                      unsigned limit);
263 static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
264 static int i40e_fw_version_get(struct rte_eth_dev *dev,
265                                 char *fw_version, size_t fw_size);
266 static int i40e_dev_info_get(struct rte_eth_dev *dev,
267                              struct rte_eth_dev_info *dev_info);
268 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
269                                 uint16_t vlan_id,
270                                 int on);
271 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
272                               enum rte_vlan_type vlan_type,
273                               uint16_t tpid);
274 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
275 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
276                                       uint16_t queue,
277                                       int on);
278 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
279 static int i40e_dev_led_on(struct rte_eth_dev *dev);
280 static int i40e_dev_led_off(struct rte_eth_dev *dev);
281 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
282                               struct rte_eth_fc_conf *fc_conf);
283 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
284                               struct rte_eth_fc_conf *fc_conf);
285 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
286                                        struct rte_eth_pfc_conf *pfc_conf);
287 static int i40e_macaddr_add(struct rte_eth_dev *dev,
288                             struct rte_ether_addr *mac_addr,
289                             uint32_t index,
290                             uint32_t pool);
291 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
292 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
293                                     struct rte_eth_rss_reta_entry64 *reta_conf,
294                                     uint16_t reta_size);
295 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
296                                    struct rte_eth_rss_reta_entry64 *reta_conf,
297                                    uint16_t reta_size);
298
299 static int i40e_get_cap(struct i40e_hw *hw);
300 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
301 static int i40e_pf_setup(struct i40e_pf *pf);
302 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
303 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
304 static int i40e_dcb_setup(struct rte_eth_dev *dev);
305 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
306                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
307 static void i40e_stat_update_48(struct i40e_hw *hw,
308                                uint32_t hireg,
309                                uint32_t loreg,
310                                bool offset_loaded,
311                                uint64_t *offset,
312                                uint64_t *stat);
313 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
314 static void i40e_dev_interrupt_handler(void *param);
315 static void i40e_dev_alarm_handler(void *param);
316 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
317                                 uint32_t base, uint32_t num);
318 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
319 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
320                         uint32_t base);
321 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
322                         uint16_t num);
323 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
324 static int i40e_veb_release(struct i40e_veb *veb);
325 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
326                                                 struct i40e_vsi *vsi);
327 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
328 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
329                                              struct i40e_macvlan_filter *mv_f,
330                                              int num,
331                                              uint16_t vlan);
332 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
333 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
334                                     struct rte_eth_rss_conf *rss_conf);
335 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
336                                       struct rte_eth_rss_conf *rss_conf);
337 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
338                                         struct rte_eth_udp_tunnel *udp_tunnel);
339 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
340                                         struct rte_eth_udp_tunnel *udp_tunnel);
341 static void i40e_filter_input_set_init(struct i40e_pf *pf);
342 static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
343                                  const struct rte_flow_ops **ops);
344 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
345                                   struct rte_eth_dcb_info *dcb_info);
346 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
347 static void i40e_configure_registers(struct i40e_hw *hw);
348 static void i40e_hw_init(struct rte_eth_dev *dev);
349 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
350 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
351                                                      uint16_t seid,
352                                                      uint16_t rule_type,
353                                                      uint16_t *entries,
354                                                      uint16_t count,
355                                                      uint16_t rule_id);
356 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
357                         struct rte_eth_mirror_conf *mirror_conf,
358                         uint8_t sw_id, uint8_t on);
359 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
360
361 static int i40e_timesync_enable(struct rte_eth_dev *dev);
362 static int i40e_timesync_disable(struct rte_eth_dev *dev);
363 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
364                                            struct timespec *timestamp,
365                                            uint32_t flags);
366 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
367                                            struct timespec *timestamp);
368 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
369
370 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
371
372 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
373                                    struct timespec *timestamp);
374 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
375                                     const struct timespec *timestamp);
376
377 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
378                                          uint16_t queue_id);
379 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
380                                           uint16_t queue_id);
381
382 static int i40e_get_regs(struct rte_eth_dev *dev,
383                          struct rte_dev_reg_info *regs);
384
385 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
386
387 static int i40e_get_eeprom(struct rte_eth_dev *dev,
388                            struct rte_dev_eeprom_info *eeprom);
389
390 static int i40e_get_module_info(struct rte_eth_dev *dev,
391                                 struct rte_eth_dev_module_info *modinfo);
392 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
393                                   struct rte_dev_eeprom_info *info);
394
395 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
396                                       struct rte_ether_addr *mac_addr);
397
398 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
399
400 static int i40e_ethertype_filter_convert(
401         const struct rte_eth_ethertype_filter *input,
402         struct i40e_ethertype_filter *filter);
403 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
404                                    struct i40e_ethertype_filter *filter);
405
406 static int i40e_tunnel_filter_convert(
407         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
408         struct i40e_tunnel_filter *tunnel_filter);
409 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
410                                 struct i40e_tunnel_filter *tunnel_filter);
411 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
412
413 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
414 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
415 static void i40e_filter_restore(struct i40e_pf *pf);
416 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
417
418 static const char *const valid_keys[] = {
419         ETH_I40E_FLOATING_VEB_ARG,
420         ETH_I40E_FLOATING_VEB_LIST_ARG,
421         ETH_I40E_SUPPORT_MULTI_DRIVER,
422         ETH_I40E_QUEUE_NUM_PER_VF_ARG,
423         ETH_I40E_VF_MSG_CFG,
424         NULL};
425
426 static const struct rte_pci_id pci_id_i40e_map[] = {
427         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
428         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
429         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
430         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
431         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
432         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
433         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
434         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
435         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
436         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
437         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
438         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
439         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
440         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
441         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
442         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
443         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
444         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
445         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
446         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
447         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
448         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
449         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
450         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
451         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
452         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
453         { .vendor_id = 0, /* sentinel */ },
454 };
455
456 static const struct eth_dev_ops i40e_eth_dev_ops = {
457         .dev_configure                = i40e_dev_configure,
458         .dev_start                    = i40e_dev_start,
459         .dev_stop                     = i40e_dev_stop,
460         .dev_close                    = i40e_dev_close,
461         .dev_reset                    = i40e_dev_reset,
462         .promiscuous_enable           = i40e_dev_promiscuous_enable,
463         .promiscuous_disable          = i40e_dev_promiscuous_disable,
464         .allmulticast_enable          = i40e_dev_allmulticast_enable,
465         .allmulticast_disable         = i40e_dev_allmulticast_disable,
466         .dev_set_link_up              = i40e_dev_set_link_up,
467         .dev_set_link_down            = i40e_dev_set_link_down,
468         .link_update                  = i40e_dev_link_update,
469         .stats_get                    = i40e_dev_stats_get,
470         .xstats_get                   = i40e_dev_xstats_get,
471         .xstats_get_names             = i40e_dev_xstats_get_names,
472         .stats_reset                  = i40e_dev_stats_reset,
473         .xstats_reset                 = i40e_dev_stats_reset,
474         .fw_version_get               = i40e_fw_version_get,
475         .dev_infos_get                = i40e_dev_info_get,
476         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
477         .vlan_filter_set              = i40e_vlan_filter_set,
478         .vlan_tpid_set                = i40e_vlan_tpid_set,
479         .vlan_offload_set             = i40e_vlan_offload_set,
480         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
481         .vlan_pvid_set                = i40e_vlan_pvid_set,
482         .rx_queue_start               = i40e_dev_rx_queue_start,
483         .rx_queue_stop                = i40e_dev_rx_queue_stop,
484         .tx_queue_start               = i40e_dev_tx_queue_start,
485         .tx_queue_stop                = i40e_dev_tx_queue_stop,
486         .rx_queue_setup               = i40e_dev_rx_queue_setup,
487         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
488         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
489         .rx_queue_release             = i40e_dev_rx_queue_release,
490         .tx_queue_setup               = i40e_dev_tx_queue_setup,
491         .tx_queue_release             = i40e_dev_tx_queue_release,
492         .dev_led_on                   = i40e_dev_led_on,
493         .dev_led_off                  = i40e_dev_led_off,
494         .flow_ctrl_get                = i40e_flow_ctrl_get,
495         .flow_ctrl_set                = i40e_flow_ctrl_set,
496         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
497         .mac_addr_add                 = i40e_macaddr_add,
498         .mac_addr_remove              = i40e_macaddr_remove,
499         .reta_update                  = i40e_dev_rss_reta_update,
500         .reta_query                   = i40e_dev_rss_reta_query,
501         .rss_hash_update              = i40e_dev_rss_hash_update,
502         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
503         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
504         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
505         .flow_ops_get                 = i40e_dev_flow_ops_get,
506         .rxq_info_get                 = i40e_rxq_info_get,
507         .txq_info_get                 = i40e_txq_info_get,
508         .rx_burst_mode_get            = i40e_rx_burst_mode_get,
509         .tx_burst_mode_get            = i40e_tx_burst_mode_get,
510         .mirror_rule_set              = i40e_mirror_rule_set,
511         .mirror_rule_reset            = i40e_mirror_rule_reset,
512         .timesync_enable              = i40e_timesync_enable,
513         .timesync_disable             = i40e_timesync_disable,
514         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
515         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
516         .get_dcb_info                 = i40e_dev_get_dcb_info,
517         .timesync_adjust_time         = i40e_timesync_adjust_time,
518         .timesync_read_time           = i40e_timesync_read_time,
519         .timesync_write_time          = i40e_timesync_write_time,
520         .get_reg                      = i40e_get_regs,
521         .get_eeprom_length            = i40e_get_eeprom_length,
522         .get_eeprom                   = i40e_get_eeprom,
523         .get_module_info              = i40e_get_module_info,
524         .get_module_eeprom            = i40e_get_module_eeprom,
525         .mac_addr_set                 = i40e_set_default_mac_addr,
526         .mtu_set                      = i40e_dev_mtu_set,
527         .tm_ops_get                   = i40e_tm_ops_get,
528         .tx_done_cleanup              = i40e_tx_done_cleanup,
529         .get_monitor_addr             = i40e_get_monitor_addr,
530 };
531
532 /* store statistics names and its offset in stats structure */
533 struct rte_i40e_xstats_name_off {
534         char name[RTE_ETH_XSTATS_NAME_SIZE];
535         unsigned offset;
536 };
537
538 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
539         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
540         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
541         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
542         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
543         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
544                 rx_unknown_protocol)},
545         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
546         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
547         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
548         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
549 };
550
551 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
552                 sizeof(rte_i40e_stats_strings[0]))
553
554 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
555         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
556                 tx_dropped_link_down)},
557         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
558         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
559                 illegal_bytes)},
560         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
561         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
562                 mac_local_faults)},
563         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
564                 mac_remote_faults)},
565         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
566                 rx_length_errors)},
567         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
568         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
569         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
570         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
571         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
572         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
573                 rx_size_127)},
574         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
575                 rx_size_255)},
576         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
577                 rx_size_511)},
578         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
579                 rx_size_1023)},
580         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
581                 rx_size_1522)},
582         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
583                 rx_size_big)},
584         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
585                 rx_undersize)},
586         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
587                 rx_oversize)},
588         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
589                 mac_short_packet_dropped)},
590         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
591                 rx_fragments)},
592         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
593         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
594         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
595                 tx_size_127)},
596         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
597                 tx_size_255)},
598         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
599                 tx_size_511)},
600         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
601                 tx_size_1023)},
602         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
603                 tx_size_1522)},
604         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
605                 tx_size_big)},
606         {"rx_flow_director_atr_match_packets",
607                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
608         {"rx_flow_director_sb_match_packets",
609                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
610         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
611                 tx_lpi_status)},
612         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
613                 rx_lpi_status)},
614         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
615                 tx_lpi_count)},
616         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
617                 rx_lpi_count)},
618 };
619
620 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
621                 sizeof(rte_i40e_hw_port_strings[0]))
622
623 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
624         {"xon_packets", offsetof(struct i40e_hw_port_stats,
625                 priority_xon_rx)},
626         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
627                 priority_xoff_rx)},
628 };
629
630 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
631                 sizeof(rte_i40e_rxq_prio_strings[0]))
632
633 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
634         {"xon_packets", offsetof(struct i40e_hw_port_stats,
635                 priority_xon_tx)},
636         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
637                 priority_xoff_tx)},
638         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
639                 priority_xon_2_xoff)},
640 };
641
642 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
643                 sizeof(rte_i40e_txq_prio_strings[0]))
644
645 static int
646 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
647         struct rte_pci_device *pci_dev)
648 {
649         char name[RTE_ETH_NAME_MAX_LEN];
650         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
651         int i, retval;
652
653         if (pci_dev->device.devargs) {
654                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
655                                 &eth_da);
656                 if (retval)
657                         return retval;
658         }
659
660         if (eth_da.nb_representor_ports > 0 &&
661             eth_da.type != RTE_ETH_REPRESENTOR_VF) {
662                 PMD_DRV_LOG(ERR, "unsupported representor type: %s\n",
663                             pci_dev->device.devargs->args);
664                 return -ENOTSUP;
665         }
666
667         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
668                 sizeof(struct i40e_adapter),
669                 eth_dev_pci_specific_init, pci_dev,
670                 eth_i40e_dev_init, NULL);
671
672         if (retval || eth_da.nb_representor_ports < 1)
673                 return retval;
674
675         /* probe VF representor ports */
676         struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
677                 pci_dev->device.name);
678
679         if (pf_ethdev == NULL)
680                 return -ENODEV;
681
682         for (i = 0; i < eth_da.nb_representor_ports; i++) {
683                 struct i40e_vf_representor representor = {
684                         .vf_id = eth_da.representor_ports[i],
685                         .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
686                                 pf_ethdev->data->dev_private)->switch_domain_id,
687                         .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
688                                 pf_ethdev->data->dev_private)
689                 };
690
691                 /* representor port net_bdf_port */
692                 snprintf(name, sizeof(name), "net_%s_representor_%d",
693                         pci_dev->device.name, eth_da.representor_ports[i]);
694
695                 retval = rte_eth_dev_create(&pci_dev->device, name,
696                         sizeof(struct i40e_vf_representor), NULL, NULL,
697                         i40e_vf_representor_init, &representor);
698
699                 if (retval)
700                         PMD_DRV_LOG(ERR, "failed to create i40e vf "
701                                 "representor %s.", name);
702         }
703
704         return 0;
705 }
706
707 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
708 {
709         struct rte_eth_dev *ethdev;
710
711         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
712         if (!ethdev)
713                 return 0;
714
715         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
716                 return rte_eth_dev_pci_generic_remove(pci_dev,
717                                         i40e_vf_representor_uninit);
718         else
719                 return rte_eth_dev_pci_generic_remove(pci_dev,
720                                                 eth_i40e_dev_uninit);
721 }
722
723 static struct rte_pci_driver rte_i40e_pmd = {
724         .id_table = pci_id_i40e_map,
725         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
726         .probe = eth_i40e_pci_probe,
727         .remove = eth_i40e_pci_remove,
728 };
729
730 static inline void
731 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
732                          uint32_t reg_val)
733 {
734         uint32_t ori_reg_val;
735         struct rte_eth_dev_data *dev_data =
736                 ((struct i40e_adapter *)hw->back)->pf.dev_data;
737         struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
738
739         ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
740         i40e_write_rx_ctl(hw, reg_addr, reg_val);
741         if (ori_reg_val != reg_val)
742                 PMD_DRV_LOG(WARNING,
743                             "i40e device %s changed global register [0x%08x]."
744                             " original: 0x%08x, new: 0x%08x",
745                             dev->device->name, reg_addr, ori_reg_val, reg_val);
746 }
747
748 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
749 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
750 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
751
752 #ifndef I40E_GLQF_ORT
753 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
754 #endif
755 #ifndef I40E_GLQF_PIT
756 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
757 #endif
758 #ifndef I40E_GLQF_L3_MAP
759 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
760 #endif
761
762 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
763 {
764         /*
765          * Initialize registers for parsing packet type of QinQ
766          * This should be removed from code once proper
767          * configuration API is added to avoid configuration conflicts
768          * between ports of the same device.
769          */
770         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
771         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
772 }
773
774 static inline void i40e_config_automask(struct i40e_pf *pf)
775 {
776         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
777         uint32_t val;
778
779         /* INTENA flag is not auto-cleared for interrupt */
780         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
781         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
782                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
783
784         /* If support multi-driver, PF will use INT0. */
785         if (!pf->support_multi_driver)
786                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
787
788         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
789 }
790
791 static inline void i40e_clear_automask(struct i40e_pf *pf)
792 {
793         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
794         uint32_t val;
795
796         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
797         val &= ~(I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
798                  I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK);
799
800         if (!pf->support_multi_driver)
801                 val &= ~I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
802
803         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
804 }
805
806 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
807
808 /*
809  * Add a ethertype filter to drop all flow control frames transmitted
810  * from VSIs.
811 */
812 static void
813 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
814 {
815         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
816         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
817                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
818                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
819         int ret;
820
821         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
822                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
823                                 pf->main_vsi_seid, 0,
824                                 TRUE, NULL, NULL);
825         if (ret)
826                 PMD_INIT_LOG(ERR,
827                         "Failed to add filter to drop flow control frames from VSIs.");
828 }
829
830 static int
831 floating_veb_list_handler(__rte_unused const char *key,
832                           const char *floating_veb_value,
833                           void *opaque)
834 {
835         int idx = 0;
836         unsigned int count = 0;
837         char *end = NULL;
838         int min, max;
839         bool *vf_floating_veb = opaque;
840
841         while (isblank(*floating_veb_value))
842                 floating_veb_value++;
843
844         /* Reset floating VEB configuration for VFs */
845         for (idx = 0; idx < I40E_MAX_VF; idx++)
846                 vf_floating_veb[idx] = false;
847
848         min = I40E_MAX_VF;
849         do {
850                 while (isblank(*floating_veb_value))
851                         floating_veb_value++;
852                 if (*floating_veb_value == '\0')
853                         return -1;
854                 errno = 0;
855                 idx = strtoul(floating_veb_value, &end, 10);
856                 if (errno || end == NULL)
857                         return -1;
858                 if (idx < 0)
859                         return -1;
860                 while (isblank(*end))
861                         end++;
862                 if (*end == '-') {
863                         min = idx;
864                 } else if ((*end == ';') || (*end == '\0')) {
865                         max = idx;
866                         if (min == I40E_MAX_VF)
867                                 min = idx;
868                         if (max >= I40E_MAX_VF)
869                                 max = I40E_MAX_VF - 1;
870                         for (idx = min; idx <= max; idx++) {
871                                 vf_floating_veb[idx] = true;
872                                 count++;
873                         }
874                         min = I40E_MAX_VF;
875                 } else {
876                         return -1;
877                 }
878                 floating_veb_value = end + 1;
879         } while (*end != '\0');
880
881         if (count == 0)
882                 return -1;
883
884         return 0;
885 }
886
887 static void
888 config_vf_floating_veb(struct rte_devargs *devargs,
889                        uint16_t floating_veb,
890                        bool *vf_floating_veb)
891 {
892         struct rte_kvargs *kvlist;
893         int i;
894         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
895
896         if (!floating_veb)
897                 return;
898         /* All the VFs attach to the floating VEB by default
899          * when the floating VEB is enabled.
900          */
901         for (i = 0; i < I40E_MAX_VF; i++)
902                 vf_floating_veb[i] = true;
903
904         if (devargs == NULL)
905                 return;
906
907         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
908         if (kvlist == NULL)
909                 return;
910
911         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
912                 rte_kvargs_free(kvlist);
913                 return;
914         }
915         /* When the floating_veb_list parameter exists, all the VFs
916          * will attach to the legacy VEB firstly, then configure VFs
917          * to the floating VEB according to the floating_veb_list.
918          */
919         if (rte_kvargs_process(kvlist, floating_veb_list,
920                                floating_veb_list_handler,
921                                vf_floating_veb) < 0) {
922                 rte_kvargs_free(kvlist);
923                 return;
924         }
925         rte_kvargs_free(kvlist);
926 }
927
928 static int
929 i40e_check_floating_handler(__rte_unused const char *key,
930                             const char *value,
931                             __rte_unused void *opaque)
932 {
933         if (strcmp(value, "1"))
934                 return -1;
935
936         return 0;
937 }
938
939 static int
940 is_floating_veb_supported(struct rte_devargs *devargs)
941 {
942         struct rte_kvargs *kvlist;
943         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
944
945         if (devargs == NULL)
946                 return 0;
947
948         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
949         if (kvlist == NULL)
950                 return 0;
951
952         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
953                 rte_kvargs_free(kvlist);
954                 return 0;
955         }
956         /* Floating VEB is enabled when there's key-value:
957          * enable_floating_veb=1
958          */
959         if (rte_kvargs_process(kvlist, floating_veb_key,
960                                i40e_check_floating_handler, NULL) < 0) {
961                 rte_kvargs_free(kvlist);
962                 return 0;
963         }
964         rte_kvargs_free(kvlist);
965
966         return 1;
967 }
968
969 static void
970 config_floating_veb(struct rte_eth_dev *dev)
971 {
972         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
973         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
974         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
975
976         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
977
978         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
979                 pf->floating_veb =
980                         is_floating_veb_supported(pci_dev->device.devargs);
981                 config_vf_floating_veb(pci_dev->device.devargs,
982                                        pf->floating_veb,
983                                        pf->floating_veb_list);
984         } else {
985                 pf->floating_veb = false;
986         }
987 }
988
989 #define I40E_L2_TAGS_S_TAG_SHIFT 1
990 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
991
992 static int
993 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
994 {
995         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
996         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
997         char ethertype_hash_name[RTE_HASH_NAMESIZE];
998         int ret;
999
1000         struct rte_hash_parameters ethertype_hash_params = {
1001                 .name = ethertype_hash_name,
1002                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
1003                 .key_len = sizeof(struct i40e_ethertype_filter_input),
1004                 .hash_func = rte_hash_crc,
1005                 .hash_func_init_val = 0,
1006                 .socket_id = rte_socket_id(),
1007         };
1008
1009         /* Initialize ethertype filter rule list and hash */
1010         TAILQ_INIT(&ethertype_rule->ethertype_list);
1011         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
1012                  "ethertype_%s", dev->device->name);
1013         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
1014         if (!ethertype_rule->hash_table) {
1015                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
1016                 return -EINVAL;
1017         }
1018         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
1019                                        sizeof(struct i40e_ethertype_filter *) *
1020                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
1021                                        0);
1022         if (!ethertype_rule->hash_map) {
1023                 PMD_INIT_LOG(ERR,
1024                              "Failed to allocate memory for ethertype hash map!");
1025                 ret = -ENOMEM;
1026                 goto err_ethertype_hash_map_alloc;
1027         }
1028
1029         return 0;
1030
1031 err_ethertype_hash_map_alloc:
1032         rte_hash_free(ethertype_rule->hash_table);
1033
1034         return ret;
1035 }
1036
1037 static int
1038 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1039 {
1040         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1041         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1042         char tunnel_hash_name[RTE_HASH_NAMESIZE];
1043         int ret;
1044
1045         struct rte_hash_parameters tunnel_hash_params = {
1046                 .name = tunnel_hash_name,
1047                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
1048                 .key_len = sizeof(struct i40e_tunnel_filter_input),
1049                 .hash_func = rte_hash_crc,
1050                 .hash_func_init_val = 0,
1051                 .socket_id = rte_socket_id(),
1052         };
1053
1054         /* Initialize tunnel filter rule list and hash */
1055         TAILQ_INIT(&tunnel_rule->tunnel_list);
1056         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1057                  "tunnel_%s", dev->device->name);
1058         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1059         if (!tunnel_rule->hash_table) {
1060                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1061                 return -EINVAL;
1062         }
1063         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1064                                     sizeof(struct i40e_tunnel_filter *) *
1065                                     I40E_MAX_TUNNEL_FILTER_NUM,
1066                                     0);
1067         if (!tunnel_rule->hash_map) {
1068                 PMD_INIT_LOG(ERR,
1069                              "Failed to allocate memory for tunnel hash map!");
1070                 ret = -ENOMEM;
1071                 goto err_tunnel_hash_map_alloc;
1072         }
1073
1074         return 0;
1075
1076 err_tunnel_hash_map_alloc:
1077         rte_hash_free(tunnel_rule->hash_table);
1078
1079         return ret;
1080 }
1081
1082 static int
1083 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1084 {
1085         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1086         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1087         struct i40e_fdir_info *fdir_info = &pf->fdir;
1088         char fdir_hash_name[RTE_HASH_NAMESIZE];
1089         uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1090         uint32_t best = hw->func_caps.fd_filters_best_effort;
1091         enum i40e_filter_pctype pctype;
1092         struct rte_bitmap *bmp = NULL;
1093         uint32_t bmp_size;
1094         void *mem = NULL;
1095         uint32_t i = 0;
1096         int ret;
1097
1098         struct rte_hash_parameters fdir_hash_params = {
1099                 .name = fdir_hash_name,
1100                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1101                 .key_len = sizeof(struct i40e_fdir_input),
1102                 .hash_func = rte_hash_crc,
1103                 .hash_func_init_val = 0,
1104                 .socket_id = rte_socket_id(),
1105         };
1106
1107         /* Initialize flow director filter rule list and hash */
1108         TAILQ_INIT(&fdir_info->fdir_list);
1109         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1110                  "fdir_%s", dev->device->name);
1111         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1112         if (!fdir_info->hash_table) {
1113                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1114                 return -EINVAL;
1115         }
1116
1117         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1118                                           sizeof(struct i40e_fdir_filter *) *
1119                                           I40E_MAX_FDIR_FILTER_NUM,
1120                                           0);
1121         if (!fdir_info->hash_map) {
1122                 PMD_INIT_LOG(ERR,
1123                              "Failed to allocate memory for fdir hash map!");
1124                 ret = -ENOMEM;
1125                 goto err_fdir_hash_map_alloc;
1126         }
1127
1128         fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1129                         sizeof(struct i40e_fdir_filter) *
1130                         I40E_MAX_FDIR_FILTER_NUM,
1131                         0);
1132
1133         if (!fdir_info->fdir_filter_array) {
1134                 PMD_INIT_LOG(ERR,
1135                              "Failed to allocate memory for fdir filter array!");
1136                 ret = -ENOMEM;
1137                 goto err_fdir_filter_array_alloc;
1138         }
1139
1140         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1141              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
1142                 pf->fdir.flow_count[pctype] = 0;
1143
1144         fdir_info->fdir_space_size = alloc + best;
1145         fdir_info->fdir_actual_cnt = 0;
1146         fdir_info->fdir_guarantee_total_space = alloc;
1147         fdir_info->fdir_guarantee_free_space =
1148                 fdir_info->fdir_guarantee_total_space;
1149
1150         PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1151
1152         fdir_info->fdir_flow_pool.pool =
1153                         rte_zmalloc("i40e_fdir_entry",
1154                                 sizeof(struct i40e_fdir_entry) *
1155                                 fdir_info->fdir_space_size,
1156                                 0);
1157
1158         if (!fdir_info->fdir_flow_pool.pool) {
1159                 PMD_INIT_LOG(ERR,
1160                              "Failed to allocate memory for bitmap flow!");
1161                 ret = -ENOMEM;
1162                 goto err_fdir_bitmap_flow_alloc;
1163         }
1164
1165         for (i = 0; i < fdir_info->fdir_space_size; i++)
1166                 fdir_info->fdir_flow_pool.pool[i].idx = i;
1167
1168         bmp_size =
1169                 rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1170         mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1171         if (mem == NULL) {
1172                 PMD_INIT_LOG(ERR,
1173                              "Failed to allocate memory for fdir bitmap!");
1174                 ret = -ENOMEM;
1175                 goto err_fdir_mem_alloc;
1176         }
1177         bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1178         if (bmp == NULL) {
1179                 PMD_INIT_LOG(ERR,
1180                              "Failed to initialization fdir bitmap!");
1181                 ret = -ENOMEM;
1182                 goto err_fdir_bmp_alloc;
1183         }
1184         for (i = 0; i < fdir_info->fdir_space_size; i++)
1185                 rte_bitmap_set(bmp, i);
1186
1187         fdir_info->fdir_flow_pool.bitmap = bmp;
1188
1189         return 0;
1190
1191 err_fdir_bmp_alloc:
1192         rte_free(mem);
1193 err_fdir_mem_alloc:
1194         rte_free(fdir_info->fdir_flow_pool.pool);
1195 err_fdir_bitmap_flow_alloc:
1196         rte_free(fdir_info->fdir_filter_array);
1197 err_fdir_filter_array_alloc:
1198         rte_free(fdir_info->hash_map);
1199 err_fdir_hash_map_alloc:
1200         rte_hash_free(fdir_info->hash_table);
1201
1202         return ret;
1203 }
1204
1205 static void
1206 i40e_init_customized_info(struct i40e_pf *pf)
1207 {
1208         int i;
1209
1210         /* Initialize customized pctype */
1211         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1212                 pf->customized_pctype[i].index = i;
1213                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1214                 pf->customized_pctype[i].valid = false;
1215         }
1216
1217         pf->gtp_support = false;
1218         pf->esp_support = false;
1219 }
1220
1221 static void
1222 i40e_init_filter_invalidation(struct i40e_pf *pf)
1223 {
1224         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1225         struct i40e_fdir_info *fdir_info = &pf->fdir;
1226         uint32_t glqf_ctl_reg = 0;
1227
1228         glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1229         if (!pf->support_multi_driver) {
1230                 fdir_info->fdir_invalprio = 1;
1231                 glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1232                 PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1233                 i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1234         } else {
1235                 if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1236                         fdir_info->fdir_invalprio = 1;
1237                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1238                 } else {
1239                         fdir_info->fdir_invalprio = 0;
1240                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1241                 }
1242         }
1243 }
1244
1245 void
1246 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1247 {
1248         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1249         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1250         struct i40e_queue_regions *info = &pf->queue_region;
1251         uint16_t i;
1252
1253         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1254                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1255
1256         memset(info, 0, sizeof(struct i40e_queue_regions));
1257 }
1258
1259 static int
1260 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1261                                const char *value,
1262                                void *opaque)
1263 {
1264         struct i40e_pf *pf;
1265         unsigned long support_multi_driver;
1266         char *end;
1267
1268         pf = (struct i40e_pf *)opaque;
1269
1270         errno = 0;
1271         support_multi_driver = strtoul(value, &end, 10);
1272         if (errno != 0 || end == value || *end != 0) {
1273                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1274                 return -(EINVAL);
1275         }
1276
1277         if (support_multi_driver == 1 || support_multi_driver == 0)
1278                 pf->support_multi_driver = (bool)support_multi_driver;
1279         else
1280                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1281                             "enable global configuration by default."
1282                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1283         return 0;
1284 }
1285
1286 static int
1287 i40e_support_multi_driver(struct rte_eth_dev *dev)
1288 {
1289         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1290         struct rte_kvargs *kvlist;
1291         int kvargs_count;
1292
1293         /* Enable global configuration by default */
1294         pf->support_multi_driver = false;
1295
1296         if (!dev->device->devargs)
1297                 return 0;
1298
1299         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1300         if (!kvlist)
1301                 return -EINVAL;
1302
1303         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1304         if (!kvargs_count) {
1305                 rte_kvargs_free(kvlist);
1306                 return 0;
1307         }
1308
1309         if (kvargs_count > 1)
1310                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1311                             "the first invalid or last valid one is used !",
1312                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1313
1314         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1315                                i40e_parse_multi_drv_handler, pf) < 0) {
1316                 rte_kvargs_free(kvlist);
1317                 return -EINVAL;
1318         }
1319
1320         rte_kvargs_free(kvlist);
1321         return 0;
1322 }
1323
1324 static int
1325 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1326                                     uint32_t reg_addr, uint64_t reg_val,
1327                                     struct i40e_asq_cmd_details *cmd_details)
1328 {
1329         uint64_t ori_reg_val;
1330         struct rte_eth_dev_data *dev_data =
1331                 ((struct i40e_adapter *)hw->back)->pf.dev_data;
1332         struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
1333         int ret;
1334
1335         ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1336         if (ret != I40E_SUCCESS) {
1337                 PMD_DRV_LOG(ERR,
1338                             "Fail to debug read from 0x%08x",
1339                             reg_addr);
1340                 return -EIO;
1341         }
1342
1343         if (ori_reg_val != reg_val)
1344                 PMD_DRV_LOG(WARNING,
1345                             "i40e device %s changed global register [0x%08x]."
1346                             " original: 0x%"PRIx64", after: 0x%"PRIx64,
1347                             dev->device->name, reg_addr, ori_reg_val, reg_val);
1348
1349         return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1350 }
1351
1352 static int
1353 read_vf_msg_config(__rte_unused const char *key,
1354                                const char *value,
1355                                void *opaque)
1356 {
1357         struct i40e_vf_msg_cfg *cfg = opaque;
1358
1359         if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1360                         &cfg->ignore_second) != 3) {
1361                 memset(cfg, 0, sizeof(*cfg));
1362                 PMD_DRV_LOG(ERR, "format error! example: "
1363                                 "%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1364                 return -EINVAL;
1365         }
1366
1367         /*
1368          * If the message validation function been enabled, the 'period'
1369          * and 'ignore_second' must greater than 0.
1370          */
1371         if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1372                 memset(cfg, 0, sizeof(*cfg));
1373                 PMD_DRV_LOG(ERR, "%s error! the second and third"
1374                                 " number must be greater than 0!",
1375                                 ETH_I40E_VF_MSG_CFG);
1376                 return -EINVAL;
1377         }
1378
1379         return 0;
1380 }
1381
1382 static int
1383 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1384                 struct i40e_vf_msg_cfg *msg_cfg)
1385 {
1386         struct rte_kvargs *kvlist;
1387         int kvargs_count;
1388         int ret = 0;
1389
1390         memset(msg_cfg, 0, sizeof(*msg_cfg));
1391
1392         if (!dev->device->devargs)
1393                 return ret;
1394
1395         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1396         if (!kvlist)
1397                 return -EINVAL;
1398
1399         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1400         if (!kvargs_count)
1401                 goto free_end;
1402
1403         if (kvargs_count > 1) {
1404                 PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1405                                 ETH_I40E_VF_MSG_CFG);
1406                 ret = -EINVAL;
1407                 goto free_end;
1408         }
1409
1410         if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1411                         read_vf_msg_config, msg_cfg) < 0)
1412                 ret = -EINVAL;
1413
1414 free_end:
1415         rte_kvargs_free(kvlist);
1416         return ret;
1417 }
1418
1419 #define I40E_ALARM_INTERVAL 50000 /* us */
1420
1421 static int
1422 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1423 {
1424         struct rte_pci_device *pci_dev;
1425         struct rte_intr_handle *intr_handle;
1426         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1427         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1428         struct i40e_vsi *vsi;
1429         int ret;
1430         uint32_t len, val;
1431         uint8_t aq_fail = 0;
1432
1433         PMD_INIT_FUNC_TRACE();
1434
1435         dev->dev_ops = &i40e_eth_dev_ops;
1436         dev->rx_queue_count = i40e_dev_rx_queue_count;
1437         dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
1438         dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1439         dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1440         dev->rx_pkt_burst = i40e_recv_pkts;
1441         dev->tx_pkt_burst = i40e_xmit_pkts;
1442         dev->tx_pkt_prepare = i40e_prep_pkts;
1443
1444         /* for secondary processes, we don't initialise any further as primary
1445          * has already done this work. Only check we don't need a different
1446          * RX function */
1447         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1448                 i40e_set_rx_function(dev);
1449                 i40e_set_tx_function(dev);
1450                 return 0;
1451         }
1452         i40e_set_default_ptype_table(dev);
1453         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1454         intr_handle = &pci_dev->intr_handle;
1455
1456         rte_eth_copy_pci_info(dev, pci_dev);
1457         dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1458
1459         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1460         pf->dev_data = dev->data;
1461
1462         hw->back = I40E_PF_TO_ADAPTER(pf);
1463         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1464         if (!hw->hw_addr) {
1465                 PMD_INIT_LOG(ERR,
1466                         "Hardware is not available, as address is NULL");
1467                 return -ENODEV;
1468         }
1469
1470         hw->vendor_id = pci_dev->id.vendor_id;
1471         hw->device_id = pci_dev->id.device_id;
1472         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1473         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1474         hw->bus.device = pci_dev->addr.devid;
1475         hw->bus.func = pci_dev->addr.function;
1476         hw->adapter_stopped = 0;
1477         hw->adapter_closed = 0;
1478
1479         /* Init switch device pointer */
1480         hw->switch_dev = NULL;
1481
1482         /*
1483          * Switch Tag value should not be identical to either the First Tag
1484          * or Second Tag values. So set something other than common Ethertype
1485          * for internal switching.
1486          */
1487         hw->switch_tag = 0xffff;
1488
1489         val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1490         if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1491                 PMD_INIT_LOG(ERR, "\nERROR: "
1492                         "Firmware recovery mode detected. Limiting functionality.\n"
1493                         "Refer to the Intel(R) Ethernet Adapters and Devices "
1494                         "User Guide for details on firmware recovery mode.");
1495                 return -EIO;
1496         }
1497
1498         i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1499         /* Check if need to support multi-driver */
1500         i40e_support_multi_driver(dev);
1501
1502         /* Make sure all is clean before doing PF reset */
1503         i40e_clear_hw(hw);
1504
1505         /* Reset here to make sure all is clean for each PF */
1506         ret = i40e_pf_reset(hw);
1507         if (ret) {
1508                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1509                 return ret;
1510         }
1511
1512         /* Initialize the shared code (base driver) */
1513         ret = i40e_init_shared_code(hw);
1514         if (ret) {
1515                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1516                 return ret;
1517         }
1518
1519         /* Initialize the parameters for adminq */
1520         i40e_init_adminq_parameter(hw);
1521         ret = i40e_init_adminq(hw);
1522         if (ret != I40E_SUCCESS) {
1523                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1524                 return -EIO;
1525         }
1526         /* Firmware of SFP x722 does not support 802.1ad frames ability */
1527         if (hw->device_id == I40E_DEV_ID_SFP_X722 ||
1528                 hw->device_id == I40E_DEV_ID_SFP_I_X722)
1529                 hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1530
1531         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1532                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1533                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1534                      ((hw->nvm.version >> 12) & 0xf),
1535                      ((hw->nvm.version >> 4) & 0xff),
1536                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1537
1538         /* Initialize the hardware */
1539         i40e_hw_init(dev);
1540
1541         i40e_config_automask(pf);
1542
1543         i40e_set_default_pctype_table(dev);
1544
1545         /*
1546          * To work around the NVM issue, initialize registers
1547          * for packet type of QinQ by software.
1548          * It should be removed once issues are fixed in NVM.
1549          */
1550         if (!pf->support_multi_driver)
1551                 i40e_GLQF_reg_init(hw);
1552
1553         /* Initialize the input set for filters (hash and fd) to default value */
1554         i40e_filter_input_set_init(pf);
1555
1556         /* initialise the L3_MAP register */
1557         if (!pf->support_multi_driver) {
1558                 ret = i40e_aq_debug_write_global_register(hw,
1559                                                    I40E_GLQF_L3_MAP(40),
1560                                                    0x00000028,  NULL);
1561                 if (ret)
1562                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1563                                      ret);
1564                 PMD_INIT_LOG(DEBUG,
1565                              "Global register 0x%08x is changed with 0x28",
1566                              I40E_GLQF_L3_MAP(40));
1567         }
1568
1569         /* Need the special FW version to support floating VEB */
1570         config_floating_veb(dev);
1571         /* Clear PXE mode */
1572         i40e_clear_pxe_mode(hw);
1573         i40e_dev_sync_phy_type(hw);
1574
1575         /*
1576          * On X710, performance number is far from the expectation on recent
1577          * firmware versions. The fix for this issue may not be integrated in
1578          * the following firmware version. So the workaround in software driver
1579          * is needed. It needs to modify the initial values of 3 internal only
1580          * registers. Note that the workaround can be removed when it is fixed
1581          * in firmware in the future.
1582          */
1583         i40e_configure_registers(hw);
1584
1585         /* Get hw capabilities */
1586         ret = i40e_get_cap(hw);
1587         if (ret != I40E_SUCCESS) {
1588                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1589                 goto err_get_capabilities;
1590         }
1591
1592         /* Initialize parameters for PF */
1593         ret = i40e_pf_parameter_init(dev);
1594         if (ret != 0) {
1595                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1596                 goto err_parameter_init;
1597         }
1598
1599         /* Initialize the queue management */
1600         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1601         if (ret < 0) {
1602                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1603                 goto err_qp_pool_init;
1604         }
1605         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1606                                 hw->func_caps.num_msix_vectors - 1);
1607         if (ret < 0) {
1608                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1609                 goto err_msix_pool_init;
1610         }
1611
1612         /* Initialize lan hmc */
1613         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1614                                 hw->func_caps.num_rx_qp, 0, 0);
1615         if (ret != I40E_SUCCESS) {
1616                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1617                 goto err_init_lan_hmc;
1618         }
1619
1620         /* Configure lan hmc */
1621         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1622         if (ret != I40E_SUCCESS) {
1623                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1624                 goto err_configure_lan_hmc;
1625         }
1626
1627         /* Get and check the mac address */
1628         i40e_get_mac_addr(hw, hw->mac.addr);
1629         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1630                 PMD_INIT_LOG(ERR, "mac address is not valid");
1631                 ret = -EIO;
1632                 goto err_get_mac_addr;
1633         }
1634         /* Copy the permanent MAC address */
1635         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1636                         (struct rte_ether_addr *)hw->mac.perm_addr);
1637
1638         /* Disable flow control */
1639         hw->fc.requested_mode = I40E_FC_NONE;
1640         i40e_set_fc(hw, &aq_fail, TRUE);
1641
1642         /* Set the global registers with default ether type value */
1643         if (!pf->support_multi_driver) {
1644                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1645                                          RTE_ETHER_TYPE_VLAN);
1646                 if (ret != I40E_SUCCESS) {
1647                         PMD_INIT_LOG(ERR,
1648                                      "Failed to set the default outer "
1649                                      "VLAN ether type");
1650                         goto err_setup_pf_switch;
1651                 }
1652         }
1653
1654         /* PF setup, which includes VSI setup */
1655         ret = i40e_pf_setup(pf);
1656         if (ret) {
1657                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1658                 goto err_setup_pf_switch;
1659         }
1660
1661         vsi = pf->main_vsi;
1662
1663         /* Disable double vlan by default */
1664         i40e_vsi_config_double_vlan(vsi, FALSE);
1665
1666         /* Disable S-TAG identification when floating_veb is disabled */
1667         if (!pf->floating_veb) {
1668                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1669                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1670                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1671                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1672                 }
1673         }
1674
1675         if (!vsi->max_macaddrs)
1676                 len = RTE_ETHER_ADDR_LEN;
1677         else
1678                 len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1679
1680         /* Should be after VSI initialized */
1681         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1682         if (!dev->data->mac_addrs) {
1683                 PMD_INIT_LOG(ERR,
1684                         "Failed to allocated memory for storing mac address");
1685                 goto err_mac_alloc;
1686         }
1687         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1688                                         &dev->data->mac_addrs[0]);
1689
1690         /* Init dcb to sw mode by default */
1691         ret = i40e_dcb_init_configure(dev, TRUE);
1692         if (ret != I40E_SUCCESS) {
1693                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1694                 pf->flags &= ~I40E_FLAG_DCB;
1695         }
1696         /* Update HW struct after DCB configuration */
1697         i40e_get_cap(hw);
1698
1699         /* initialize pf host driver to setup SRIOV resource if applicable */
1700         i40e_pf_host_init(dev);
1701
1702         /* register callback func to eal lib */
1703         rte_intr_callback_register(intr_handle,
1704                                    i40e_dev_interrupt_handler, dev);
1705
1706         /* configure and enable device interrupt */
1707         i40e_pf_config_irq0(hw, TRUE);
1708         i40e_pf_enable_irq0(hw);
1709
1710         /* enable uio intr after callback register */
1711         rte_intr_enable(intr_handle);
1712
1713         /* By default disable flexible payload in global configuration */
1714         if (!pf->support_multi_driver)
1715                 i40e_flex_payload_reg_set_default(hw);
1716
1717         /*
1718          * Add an ethertype filter to drop all flow control frames transmitted
1719          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1720          * frames to wire.
1721          */
1722         i40e_add_tx_flow_control_drop_filter(pf);
1723
1724         /* Set the max frame size to 0x2600 by default,
1725          * in case other drivers changed the default value.
1726          */
1727         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
1728
1729         /* initialize mirror rule list */
1730         TAILQ_INIT(&pf->mirror_list);
1731
1732         /* initialize RSS rule list */
1733         TAILQ_INIT(&pf->rss_config_list);
1734
1735         /* initialize Traffic Manager configuration */
1736         i40e_tm_conf_init(dev);
1737
1738         /* Initialize customized information */
1739         i40e_init_customized_info(pf);
1740
1741         /* Initialize the filter invalidation configuration */
1742         i40e_init_filter_invalidation(pf);
1743
1744         ret = i40e_init_ethtype_filter_list(dev);
1745         if (ret < 0)
1746                 goto err_init_ethtype_filter_list;
1747         ret = i40e_init_tunnel_filter_list(dev);
1748         if (ret < 0)
1749                 goto err_init_tunnel_filter_list;
1750         ret = i40e_init_fdir_filter_list(dev);
1751         if (ret < 0)
1752                 goto err_init_fdir_filter_list;
1753
1754         /* initialize queue region configuration */
1755         i40e_init_queue_region_conf(dev);
1756
1757         /* reset all stats of the device, including pf and main vsi */
1758         i40e_dev_stats_reset(dev);
1759
1760         return 0;
1761
1762 err_init_fdir_filter_list:
1763         rte_hash_free(pf->tunnel.hash_table);
1764         rte_free(pf->tunnel.hash_map);
1765 err_init_tunnel_filter_list:
1766         rte_hash_free(pf->ethertype.hash_table);
1767         rte_free(pf->ethertype.hash_map);
1768 err_init_ethtype_filter_list:
1769         rte_intr_callback_unregister(intr_handle,
1770                 i40e_dev_interrupt_handler, dev);
1771         rte_free(dev->data->mac_addrs);
1772         dev->data->mac_addrs = NULL;
1773 err_mac_alloc:
1774         i40e_vsi_release(pf->main_vsi);
1775 err_setup_pf_switch:
1776 err_get_mac_addr:
1777 err_configure_lan_hmc:
1778         (void)i40e_shutdown_lan_hmc(hw);
1779 err_init_lan_hmc:
1780         i40e_res_pool_destroy(&pf->msix_pool);
1781 err_msix_pool_init:
1782         i40e_res_pool_destroy(&pf->qp_pool);
1783 err_qp_pool_init:
1784 err_parameter_init:
1785 err_get_capabilities:
1786         (void)i40e_shutdown_adminq(hw);
1787
1788         return ret;
1789 }
1790
1791 static void
1792 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1793 {
1794         struct i40e_ethertype_filter *p_ethertype;
1795         struct i40e_ethertype_rule *ethertype_rule;
1796
1797         ethertype_rule = &pf->ethertype;
1798         /* Remove all ethertype filter rules and hash */
1799         if (ethertype_rule->hash_map)
1800                 rte_free(ethertype_rule->hash_map);
1801         if (ethertype_rule->hash_table)
1802                 rte_hash_free(ethertype_rule->hash_table);
1803
1804         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1805                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1806                              p_ethertype, rules);
1807                 rte_free(p_ethertype);
1808         }
1809 }
1810
1811 static void
1812 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1813 {
1814         struct i40e_tunnel_filter *p_tunnel;
1815         struct i40e_tunnel_rule *tunnel_rule;
1816
1817         tunnel_rule = &pf->tunnel;
1818         /* Remove all tunnel director rules and hash */
1819         if (tunnel_rule->hash_map)
1820                 rte_free(tunnel_rule->hash_map);
1821         if (tunnel_rule->hash_table)
1822                 rte_hash_free(tunnel_rule->hash_table);
1823
1824         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1825                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1826                 rte_free(p_tunnel);
1827         }
1828 }
1829
1830 static void
1831 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1832 {
1833         struct i40e_fdir_filter *p_fdir;
1834         struct i40e_fdir_info *fdir_info;
1835
1836         fdir_info = &pf->fdir;
1837
1838         /* Remove all flow director rules */
1839         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1840                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1841 }
1842
1843 static void
1844 i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1845 {
1846         struct i40e_fdir_info *fdir_info;
1847
1848         fdir_info = &pf->fdir;
1849
1850         /* flow director memory cleanup */
1851         if (fdir_info->hash_map)
1852                 rte_free(fdir_info->hash_map);
1853         if (fdir_info->hash_table)
1854                 rte_hash_free(fdir_info->hash_table);
1855         if (fdir_info->fdir_flow_pool.bitmap)
1856                 rte_free(fdir_info->fdir_flow_pool.bitmap);
1857         if (fdir_info->fdir_flow_pool.pool)
1858                 rte_free(fdir_info->fdir_flow_pool.pool);
1859         if (fdir_info->fdir_filter_array)
1860                 rte_free(fdir_info->fdir_filter_array);
1861 }
1862
1863 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1864 {
1865         /*
1866          * Disable by default flexible payload
1867          * for corresponding L2/L3/L4 layers.
1868          */
1869         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1870         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1871         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1872 }
1873
1874 static int
1875 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1876 {
1877         struct i40e_hw *hw;
1878
1879         PMD_INIT_FUNC_TRACE();
1880
1881         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1882                 return 0;
1883
1884         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1885
1886         if (hw->adapter_closed == 0)
1887                 i40e_dev_close(dev);
1888
1889         return 0;
1890 }
1891
1892 static int
1893 i40e_dev_configure(struct rte_eth_dev *dev)
1894 {
1895         struct i40e_adapter *ad =
1896                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1897         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1898         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1899         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1900         int i, ret;
1901
1902         ret = i40e_dev_sync_phy_type(hw);
1903         if (ret)
1904                 return ret;
1905
1906         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1907          * bulk allocation or vector Rx preconditions we will reset it.
1908          */
1909         ad->rx_bulk_alloc_allowed = true;
1910         ad->rx_vec_allowed = true;
1911         ad->tx_simple_allowed = true;
1912         ad->tx_vec_allowed = true;
1913
1914         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1915                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1916
1917         /* Only legacy filter API needs the following fdir config. So when the
1918          * legacy filter API is deprecated, the following codes should also be
1919          * removed.
1920          */
1921         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1922                 ret = i40e_fdir_setup(pf);
1923                 if (ret != I40E_SUCCESS) {
1924                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1925                         return -ENOTSUP;
1926                 }
1927                 ret = i40e_fdir_configure(dev);
1928                 if (ret < 0) {
1929                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1930                         goto err;
1931                 }
1932         } else
1933                 i40e_fdir_teardown(pf);
1934
1935         ret = i40e_dev_init_vlan(dev);
1936         if (ret < 0)
1937                 goto err;
1938
1939         /* VMDQ setup.
1940          *  General PMD driver call sequence are NIC init, configure,
1941          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1942          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1943          *  applicable. So, VMDQ setting has to be done before
1944          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1945          *  For RSS setting, it will try to calculate actual configured RX queue
1946          *  number, which will be available after rx_queue_setup(). dev_start()
1947          *  function is good to place RSS setup.
1948          */
1949         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1950                 ret = i40e_vmdq_setup(dev);
1951                 if (ret)
1952                         goto err;
1953         }
1954
1955         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1956                 ret = i40e_dcb_setup(dev);
1957                 if (ret) {
1958                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1959                         goto err_dcb;
1960                 }
1961         }
1962
1963         TAILQ_INIT(&pf->flow_list);
1964
1965         return 0;
1966
1967 err_dcb:
1968         /* need to release vmdq resource if exists */
1969         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1970                 i40e_vsi_release(pf->vmdq[i].vsi);
1971                 pf->vmdq[i].vsi = NULL;
1972         }
1973         rte_free(pf->vmdq);
1974         pf->vmdq = NULL;
1975 err:
1976         /* Need to release fdir resource if exists.
1977          * Only legacy filter API needs the following fdir config. So when the
1978          * legacy filter API is deprecated, the following code should also be
1979          * removed.
1980          */
1981         i40e_fdir_teardown(pf);
1982         return ret;
1983 }
1984
1985 void
1986 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1987 {
1988         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
1989         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1990         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1991         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1992         uint16_t msix_vect = vsi->msix_intr;
1993         uint16_t i;
1994
1995         for (i = 0; i < vsi->nb_qps; i++) {
1996                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1997                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1998                 rte_wmb();
1999         }
2000
2001         if (vsi->type != I40E_VSI_SRIOV) {
2002                 if (!rte_intr_allow_others(intr_handle)) {
2003                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2004                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
2005                         I40E_WRITE_REG(hw,
2006                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2007                                        0);
2008                 } else {
2009                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2010                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
2011                         I40E_WRITE_REG(hw,
2012                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2013                                                        msix_vect - 1), 0);
2014                 }
2015         } else {
2016                 uint32_t reg;
2017                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2018                         vsi->user_param + (msix_vect - 1);
2019
2020                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2021                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
2022         }
2023         I40E_WRITE_FLUSH(hw);
2024 }
2025
2026 static void
2027 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
2028                        int base_queue, int nb_queue,
2029                        uint16_t itr_idx)
2030 {
2031         int i;
2032         uint32_t val;
2033         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2034         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2035
2036         /* Bind all RX queues to allocated MSIX interrupt */
2037         for (i = 0; i < nb_queue; i++) {
2038                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2039                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2040                         ((base_queue + i + 1) <<
2041                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2042                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2043                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2044
2045                 if (i == nb_queue - 1)
2046                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2047                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2048         }
2049
2050         /* Write first RX queue to Link list register as the head element */
2051         if (vsi->type != I40E_VSI_SRIOV) {
2052                 uint16_t interval =
2053                         i40e_calc_itr_interval(1, pf->support_multi_driver);
2054
2055                 if (msix_vect == I40E_MISC_VEC_ID) {
2056                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2057                                        (base_queue <<
2058                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2059                                        (0x0 <<
2060                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2061                         I40E_WRITE_REG(hw,
2062                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2063                                        interval);
2064                 } else {
2065                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2066                                        (base_queue <<
2067                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2068                                        (0x0 <<
2069                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2070                         I40E_WRITE_REG(hw,
2071                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2072                                                        msix_vect - 1),
2073                                        interval);
2074                 }
2075         } else {
2076                 uint32_t reg;
2077
2078                 if (msix_vect == I40E_MISC_VEC_ID) {
2079                         I40E_WRITE_REG(hw,
2080                                        I40E_VPINT_LNKLST0(vsi->user_param),
2081                                        (base_queue <<
2082                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2083                                        (0x0 <<
2084                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2085                 } else {
2086                         /* num_msix_vectors_vf needs to minus irq0 */
2087                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2088                                 vsi->user_param + (msix_vect - 1);
2089
2090                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2091                                        (base_queue <<
2092                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2093                                        (0x0 <<
2094                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2095                 }
2096         }
2097
2098         I40E_WRITE_FLUSH(hw);
2099 }
2100
2101 int
2102 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2103 {
2104         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2105         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2106         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2107         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2108         uint16_t msix_vect = vsi->msix_intr;
2109         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2110         uint16_t queue_idx = 0;
2111         int record = 0;
2112         int i;
2113
2114         for (i = 0; i < vsi->nb_qps; i++) {
2115                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2116                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2117         }
2118
2119         /* VF bind interrupt */
2120         if (vsi->type == I40E_VSI_SRIOV) {
2121                 if (vsi->nb_msix == 0) {
2122                         PMD_DRV_LOG(ERR, "No msix resource");
2123                         return -EINVAL;
2124                 }
2125                 __vsi_queues_bind_intr(vsi, msix_vect,
2126                                        vsi->base_queue, vsi->nb_qps,
2127                                        itr_idx);
2128                 return 0;
2129         }
2130
2131         /* PF & VMDq bind interrupt */
2132         if (rte_intr_dp_is_en(intr_handle)) {
2133                 if (vsi->type == I40E_VSI_MAIN) {
2134                         queue_idx = 0;
2135                         record = 1;
2136                 } else if (vsi->type == I40E_VSI_VMDQ2) {
2137                         struct i40e_vsi *main_vsi =
2138                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2139                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
2140                         record = 1;
2141                 }
2142         }
2143
2144         for (i = 0; i < vsi->nb_used_qps; i++) {
2145                 if (vsi->nb_msix == 0) {
2146                         PMD_DRV_LOG(ERR, "No msix resource");
2147                         return -EINVAL;
2148                 } else if (nb_msix <= 1) {
2149                         if (!rte_intr_allow_others(intr_handle))
2150                                 /* allow to share MISC_VEC_ID */
2151                                 msix_vect = I40E_MISC_VEC_ID;
2152
2153                         /* no enough msix_vect, map all to one */
2154                         __vsi_queues_bind_intr(vsi, msix_vect,
2155                                                vsi->base_queue + i,
2156                                                vsi->nb_used_qps - i,
2157                                                itr_idx);
2158                         for (; !!record && i < vsi->nb_used_qps; i++)
2159                                 intr_handle->intr_vec[queue_idx + i] =
2160                                         msix_vect;
2161                         break;
2162                 }
2163                 /* 1:1 queue/msix_vect mapping */
2164                 __vsi_queues_bind_intr(vsi, msix_vect,
2165                                        vsi->base_queue + i, 1,
2166                                        itr_idx);
2167                 if (!!record)
2168                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
2169
2170                 msix_vect++;
2171                 nb_msix--;
2172         }
2173
2174         return 0;
2175 }
2176
2177 void
2178 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2179 {
2180         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2181         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2182         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2183         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2184         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2185         uint16_t msix_intr, i;
2186
2187         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2188                 for (i = 0; i < vsi->nb_msix; i++) {
2189                         msix_intr = vsi->msix_intr + i;
2190                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2191                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
2192                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2193                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2194                 }
2195         else
2196                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2197                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
2198                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2199                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2200
2201         I40E_WRITE_FLUSH(hw);
2202 }
2203
2204 void
2205 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2206 {
2207         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2208         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2209         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2210         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2211         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2212         uint16_t msix_intr, i;
2213
2214         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2215                 for (i = 0; i < vsi->nb_msix; i++) {
2216                         msix_intr = vsi->msix_intr + i;
2217                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2218                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2219                 }
2220         else
2221                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2222                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2223
2224         I40E_WRITE_FLUSH(hw);
2225 }
2226
2227 static inline uint8_t
2228 i40e_parse_link_speeds(uint16_t link_speeds)
2229 {
2230         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2231
2232         if (link_speeds & ETH_LINK_SPEED_40G)
2233                 link_speed |= I40E_LINK_SPEED_40GB;
2234         if (link_speeds & ETH_LINK_SPEED_25G)
2235                 link_speed |= I40E_LINK_SPEED_25GB;
2236         if (link_speeds & ETH_LINK_SPEED_20G)
2237                 link_speed |= I40E_LINK_SPEED_20GB;
2238         if (link_speeds & ETH_LINK_SPEED_10G)
2239                 link_speed |= I40E_LINK_SPEED_10GB;
2240         if (link_speeds & ETH_LINK_SPEED_1G)
2241                 link_speed |= I40E_LINK_SPEED_1GB;
2242         if (link_speeds & ETH_LINK_SPEED_100M)
2243                 link_speed |= I40E_LINK_SPEED_100MB;
2244
2245         return link_speed;
2246 }
2247
2248 static int
2249 i40e_phy_conf_link(struct i40e_hw *hw,
2250                    uint8_t abilities,
2251                    uint8_t force_speed,
2252                    bool is_up)
2253 {
2254         enum i40e_status_code status;
2255         struct i40e_aq_get_phy_abilities_resp phy_ab;
2256         struct i40e_aq_set_phy_config phy_conf;
2257         enum i40e_aq_phy_type cnt;
2258         uint8_t avail_speed;
2259         uint32_t phy_type_mask = 0;
2260
2261         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2262                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2263                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2264                         I40E_AQ_PHY_FLAG_LOW_POWER;
2265         int ret = -ENOTSUP;
2266
2267         /* To get phy capabilities of available speeds. */
2268         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2269                                               NULL);
2270         if (status) {
2271                 PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2272                                 status);
2273                 return ret;
2274         }
2275         avail_speed = phy_ab.link_speed;
2276
2277         /* To get the current phy config. */
2278         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2279                                               NULL);
2280         if (status) {
2281                 PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2282                                 status);
2283                 return ret;
2284         }
2285
2286         /* If link needs to go up and it is in autoneg mode the speed is OK,
2287          * no need to set up again.
2288          */
2289         if (is_up && phy_ab.phy_type != 0 &&
2290                      abilities & I40E_AQ_PHY_AN_ENABLED &&
2291                      phy_ab.link_speed != 0)
2292                 return I40E_SUCCESS;
2293
2294         memset(&phy_conf, 0, sizeof(phy_conf));
2295
2296         /* bits 0-2 use the values from get_phy_abilities_resp */
2297         abilities &= ~mask;
2298         abilities |= phy_ab.abilities & mask;
2299
2300         phy_conf.abilities = abilities;
2301
2302         /* If link needs to go up, but the force speed is not supported,
2303          * Warn users and config the default available speeds.
2304          */
2305         if (is_up && !(force_speed & avail_speed)) {
2306                 PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2307                 phy_conf.link_speed = avail_speed;
2308         } else {
2309                 phy_conf.link_speed = is_up ? force_speed : avail_speed;
2310         }
2311
2312         /* PHY type mask needs to include each type except PHY type extension */
2313         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2314                 phy_type_mask |= 1 << cnt;
2315
2316         /* use get_phy_abilities_resp value for the rest */
2317         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2318         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2319                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2320                 I40E_AQ_PHY_TYPE_EXT_25G_LR | I40E_AQ_PHY_TYPE_EXT_25G_AOC |
2321                 I40E_AQ_PHY_TYPE_EXT_25G_ACC) : 0;
2322         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2323         phy_conf.eee_capability = phy_ab.eee_capability;
2324         phy_conf.eeer = phy_ab.eeer_val;
2325         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2326
2327         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2328                     phy_ab.abilities, phy_ab.link_speed);
2329         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2330                     phy_conf.abilities, phy_conf.link_speed);
2331
2332         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2333         if (status)
2334                 return ret;
2335
2336         return I40E_SUCCESS;
2337 }
2338
2339 static int
2340 i40e_apply_link_speed(struct rte_eth_dev *dev)
2341 {
2342         uint8_t speed;
2343         uint8_t abilities = 0;
2344         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2345         struct rte_eth_conf *conf = &dev->data->dev_conf;
2346
2347         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2348                      I40E_AQ_PHY_LINK_ENABLED;
2349
2350         if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2351                 conf->link_speeds = ETH_LINK_SPEED_40G |
2352                                     ETH_LINK_SPEED_25G |
2353                                     ETH_LINK_SPEED_20G |
2354                                     ETH_LINK_SPEED_10G |
2355                                     ETH_LINK_SPEED_1G |
2356                                     ETH_LINK_SPEED_100M;
2357
2358                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2359         } else {
2360                 abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2361         }
2362         speed = i40e_parse_link_speeds(conf->link_speeds);
2363
2364         return i40e_phy_conf_link(hw, abilities, speed, true);
2365 }
2366
2367 static int
2368 i40e_dev_start(struct rte_eth_dev *dev)
2369 {
2370         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2371         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2372         struct i40e_vsi *main_vsi = pf->main_vsi;
2373         int ret, i;
2374         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2375         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2376         uint32_t intr_vector = 0;
2377         struct i40e_vsi *vsi;
2378         uint16_t nb_rxq, nb_txq;
2379
2380         hw->adapter_stopped = 0;
2381
2382         rte_intr_disable(intr_handle);
2383
2384         if ((rte_intr_cap_multiple(intr_handle) ||
2385              !RTE_ETH_DEV_SRIOV(dev).active) &&
2386             dev->data->dev_conf.intr_conf.rxq != 0) {
2387                 intr_vector = dev->data->nb_rx_queues;
2388                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2389                 if (ret)
2390                         return ret;
2391         }
2392
2393         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2394                 intr_handle->intr_vec =
2395                         rte_zmalloc("intr_vec",
2396                                     dev->data->nb_rx_queues * sizeof(int),
2397                                     0);
2398                 if (!intr_handle->intr_vec) {
2399                         PMD_INIT_LOG(ERR,
2400                                 "Failed to allocate %d rx_queues intr_vec",
2401                                 dev->data->nb_rx_queues);
2402                         return -ENOMEM;
2403                 }
2404         }
2405
2406         /* Initialize VSI */
2407         ret = i40e_dev_rxtx_init(pf);
2408         if (ret != I40E_SUCCESS) {
2409                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2410                 return ret;
2411         }
2412
2413         /* Map queues with MSIX interrupt */
2414         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2415                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2416         ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2417         if (ret < 0)
2418                 return ret;
2419         i40e_vsi_enable_queues_intr(main_vsi);
2420
2421         /* Map VMDQ VSI queues with MSIX interrupt */
2422         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2423                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2424                 ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2425                                                 I40E_ITR_INDEX_DEFAULT);
2426                 if (ret < 0)
2427                         return ret;
2428                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2429         }
2430
2431         /* Enable all queues which have been configured */
2432         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
2433                 ret = i40e_dev_rx_queue_start(dev, nb_rxq);
2434                 if (ret)
2435                         goto rx_err;
2436         }
2437
2438         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
2439                 ret = i40e_dev_tx_queue_start(dev, nb_txq);
2440                 if (ret)
2441                         goto tx_err;
2442         }
2443
2444         /* Enable receiving broadcast packets */
2445         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2446         if (ret != I40E_SUCCESS)
2447                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2448
2449         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2450                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2451                                                 true, NULL);
2452                 if (ret != I40E_SUCCESS)
2453                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2454         }
2455
2456         /* Enable the VLAN promiscuous mode. */
2457         if (pf->vfs) {
2458                 for (i = 0; i < pf->vf_num; i++) {
2459                         vsi = pf->vfs[i].vsi;
2460                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2461                                                      true, NULL);
2462                 }
2463         }
2464
2465         /* Enable mac loopback mode */
2466         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2467             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2468                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2469                 if (ret != I40E_SUCCESS) {
2470                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2471                         goto tx_err;
2472                 }
2473         }
2474
2475         /* Apply link configure */
2476         ret = i40e_apply_link_speed(dev);
2477         if (I40E_SUCCESS != ret) {
2478                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2479                 goto tx_err;
2480         }
2481
2482         if (!rte_intr_allow_others(intr_handle)) {
2483                 rte_intr_callback_unregister(intr_handle,
2484                                              i40e_dev_interrupt_handler,
2485                                              (void *)dev);
2486                 /* configure and enable device interrupt */
2487                 i40e_pf_config_irq0(hw, FALSE);
2488                 i40e_pf_enable_irq0(hw);
2489
2490                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2491                         PMD_INIT_LOG(INFO,
2492                                 "lsc won't enable because of no intr multiplex");
2493         } else {
2494                 ret = i40e_aq_set_phy_int_mask(hw,
2495                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2496                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2497                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2498                 if (ret != I40E_SUCCESS)
2499                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2500
2501                 /* Call get_link_info aq commond to enable/disable LSE */
2502                 i40e_dev_link_update(dev, 0);
2503         }
2504
2505         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2506                 rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2507                                   i40e_dev_alarm_handler, dev);
2508         } else {
2509                 /* enable uio intr after callback register */
2510                 rte_intr_enable(intr_handle);
2511         }
2512
2513         i40e_filter_restore(pf);
2514
2515         if (pf->tm_conf.root && !pf->tm_conf.committed)
2516                 PMD_DRV_LOG(WARNING,
2517                             "please call hierarchy_commit() "
2518                             "before starting the port");
2519
2520         return I40E_SUCCESS;
2521
2522 tx_err:
2523         for (i = 0; i < nb_txq; i++)
2524                 i40e_dev_tx_queue_stop(dev, i);
2525 rx_err:
2526         for (i = 0; i < nb_rxq; i++)
2527                 i40e_dev_rx_queue_stop(dev, i);
2528
2529         return ret;
2530 }
2531
2532 static int
2533 i40e_dev_stop(struct rte_eth_dev *dev)
2534 {
2535         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2536         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2537         struct i40e_vsi *main_vsi = pf->main_vsi;
2538         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2539         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2540         int i;
2541
2542         if (hw->adapter_stopped == 1)
2543                 return 0;
2544
2545         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2546                 rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2547                 rte_intr_enable(intr_handle);
2548         }
2549
2550         /* Disable all queues */
2551         for (i = 0; i < dev->data->nb_tx_queues; i++)
2552                 i40e_dev_tx_queue_stop(dev, i);
2553
2554         for (i = 0; i < dev->data->nb_rx_queues; i++)
2555                 i40e_dev_rx_queue_stop(dev, i);
2556
2557         /* un-map queues with interrupt registers */
2558         i40e_vsi_disable_queues_intr(main_vsi);
2559         i40e_vsi_queues_unbind_intr(main_vsi);
2560
2561         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2562                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2563                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2564         }
2565
2566         /* Clear all queues and release memory */
2567         i40e_dev_clear_queues(dev);
2568
2569         /* Set link down */
2570         i40e_dev_set_link_down(dev);
2571
2572         if (!rte_intr_allow_others(intr_handle))
2573                 /* resume to the default handler */
2574                 rte_intr_callback_register(intr_handle,
2575                                            i40e_dev_interrupt_handler,
2576                                            (void *)dev);
2577
2578         /* Clean datapath event and queue/vec mapping */
2579         rte_intr_efd_disable(intr_handle);
2580         if (intr_handle->intr_vec) {
2581                 rte_free(intr_handle->intr_vec);
2582                 intr_handle->intr_vec = NULL;
2583         }
2584
2585         /* reset hierarchy commit */
2586         pf->tm_conf.committed = false;
2587
2588         hw->adapter_stopped = 1;
2589         dev->data->dev_started = 0;
2590
2591         pf->adapter->rss_reta_updated = 0;
2592
2593         return 0;
2594 }
2595
2596 static int
2597 i40e_dev_close(struct rte_eth_dev *dev)
2598 {
2599         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2600         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2601         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2602         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2603         struct i40e_mirror_rule *p_mirror;
2604         struct i40e_filter_control_settings settings;
2605         struct rte_flow *p_flow;
2606         uint32_t reg;
2607         int i;
2608         int ret;
2609         uint8_t aq_fail = 0;
2610         int retries = 0;
2611
2612         PMD_INIT_FUNC_TRACE();
2613         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2614                 return 0;
2615
2616         ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2617         if (ret)
2618                 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2619
2620
2621         ret = i40e_dev_stop(dev);
2622
2623         /* Remove all mirror rules */
2624         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2625                 ret = i40e_aq_del_mirror_rule(hw,
2626                                               pf->main_vsi->veb->seid,
2627                                               p_mirror->rule_type,
2628                                               p_mirror->entries,
2629                                               p_mirror->num_entries,
2630                                               p_mirror->id);
2631                 if (ret < 0)
2632                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2633                                     "status = %d, aq_err = %d.", ret,
2634                                     hw->aq.asq_last_status);
2635
2636                 /* remove mirror software resource anyway */
2637                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2638                 rte_free(p_mirror);
2639                 pf->nb_mirror_rule--;
2640         }
2641
2642         i40e_dev_free_queues(dev);
2643
2644         /* Disable interrupt */
2645         i40e_pf_disable_irq0(hw);
2646         rte_intr_disable(intr_handle);
2647
2648         /*
2649          * Only legacy filter API needs the following fdir config. So when the
2650          * legacy filter API is deprecated, the following code should also be
2651          * removed.
2652          */
2653         i40e_fdir_teardown(pf);
2654
2655         /* shutdown and destroy the HMC */
2656         i40e_shutdown_lan_hmc(hw);
2657
2658         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2659                 i40e_vsi_release(pf->vmdq[i].vsi);
2660                 pf->vmdq[i].vsi = NULL;
2661         }
2662         rte_free(pf->vmdq);
2663         pf->vmdq = NULL;
2664
2665         /* release all the existing VSIs and VEBs */
2666         i40e_vsi_release(pf->main_vsi);
2667
2668         /* shutdown the adminq */
2669         i40e_aq_queue_shutdown(hw, true);
2670         i40e_shutdown_adminq(hw);
2671
2672         i40e_res_pool_destroy(&pf->qp_pool);
2673         i40e_res_pool_destroy(&pf->msix_pool);
2674
2675         /* Disable flexible payload in global configuration */
2676         if (!pf->support_multi_driver)
2677                 i40e_flex_payload_reg_set_default(hw);
2678
2679         /* force a PF reset to clean anything leftover */
2680         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2681         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2682                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2683         I40E_WRITE_FLUSH(hw);
2684
2685         /* Clear PXE mode */
2686         i40e_clear_pxe_mode(hw);
2687
2688         /* Unconfigure filter control */
2689         memset(&settings, 0, sizeof(settings));
2690         ret = i40e_set_filter_control(hw, &settings);
2691         if (ret)
2692                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2693                                         ret);
2694
2695         /* Disable flow control */
2696         hw->fc.requested_mode = I40E_FC_NONE;
2697         i40e_set_fc(hw, &aq_fail, TRUE);
2698
2699         /* uninitialize pf host driver */
2700         i40e_pf_host_uninit(dev);
2701
2702         do {
2703                 ret = rte_intr_callback_unregister(intr_handle,
2704                                 i40e_dev_interrupt_handler, dev);
2705                 if (ret >= 0 || ret == -ENOENT) {
2706                         break;
2707                 } else if (ret != -EAGAIN) {
2708                         PMD_INIT_LOG(ERR,
2709                                  "intr callback unregister failed: %d",
2710                                  ret);
2711                 }
2712                 i40e_msec_delay(500);
2713         } while (retries++ < 5);
2714
2715         i40e_rm_ethtype_filter_list(pf);
2716         i40e_rm_tunnel_filter_list(pf);
2717         i40e_rm_fdir_filter_list(pf);
2718
2719         /* Remove all flows */
2720         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2721                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2722                 /* Do not free FDIR flows since they are static allocated */
2723                 if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
2724                         rte_free(p_flow);
2725         }
2726
2727         /* release the fdir static allocated memory */
2728         i40e_fdir_memory_cleanup(pf);
2729
2730         /* Remove all Traffic Manager configuration */
2731         i40e_tm_conf_uninit(dev);
2732
2733         i40e_clear_automask(pf);
2734
2735         hw->adapter_closed = 1;
2736         return ret;
2737 }
2738
2739 /*
2740  * Reset PF device only to re-initialize resources in PMD layer
2741  */
2742 static int
2743 i40e_dev_reset(struct rte_eth_dev *dev)
2744 {
2745         int ret;
2746
2747         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2748          * its VF to make them align with it. The detailed notification
2749          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2750          * To avoid unexpected behavior in VF, currently reset of PF with
2751          * SR-IOV activation is not supported. It might be supported later.
2752          */
2753         if (dev->data->sriov.active)
2754                 return -ENOTSUP;
2755
2756         ret = eth_i40e_dev_uninit(dev);
2757         if (ret)
2758                 return ret;
2759
2760         ret = eth_i40e_dev_init(dev, NULL);
2761
2762         return ret;
2763 }
2764
2765 static int
2766 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2767 {
2768         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2769         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2770         struct i40e_vsi *vsi = pf->main_vsi;
2771         int status;
2772
2773         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2774                                                      true, NULL, true);
2775         if (status != I40E_SUCCESS) {
2776                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2777                 return -EAGAIN;
2778         }
2779
2780         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2781                                                         TRUE, NULL);
2782         if (status != I40E_SUCCESS) {
2783                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2784                 /* Rollback unicast promiscuous mode */
2785                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2786                                                     false, NULL, true);
2787                 return -EAGAIN;
2788         }
2789
2790         return 0;
2791 }
2792
2793 static int
2794 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2795 {
2796         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2797         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2798         struct i40e_vsi *vsi = pf->main_vsi;
2799         int status;
2800
2801         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2802                                                      false, NULL, true);
2803         if (status != I40E_SUCCESS) {
2804                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2805                 return -EAGAIN;
2806         }
2807
2808         /* must remain in all_multicast mode */
2809         if (dev->data->all_multicast == 1)
2810                 return 0;
2811
2812         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2813                                                         false, NULL);
2814         if (status != I40E_SUCCESS) {
2815                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2816                 /* Rollback unicast promiscuous mode */
2817                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2818                                                     true, NULL, true);
2819                 return -EAGAIN;
2820         }
2821
2822         return 0;
2823 }
2824
2825 static int
2826 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2827 {
2828         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2829         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2830         struct i40e_vsi *vsi = pf->main_vsi;
2831         int ret;
2832
2833         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2834         if (ret != I40E_SUCCESS) {
2835                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2836                 return -EAGAIN;
2837         }
2838
2839         return 0;
2840 }
2841
2842 static int
2843 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2844 {
2845         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2846         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2847         struct i40e_vsi *vsi = pf->main_vsi;
2848         int ret;
2849
2850         if (dev->data->promiscuous == 1)
2851                 return 0; /* must remain in all_multicast mode */
2852
2853         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2854                                 vsi->seid, FALSE, NULL);
2855         if (ret != I40E_SUCCESS) {
2856                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2857                 return -EAGAIN;
2858         }
2859
2860         return 0;
2861 }
2862
2863 /*
2864  * Set device link up.
2865  */
2866 static int
2867 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2868 {
2869         /* re-apply link speed setting */
2870         return i40e_apply_link_speed(dev);
2871 }
2872
2873 /*
2874  * Set device link down.
2875  */
2876 static int
2877 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2878 {
2879         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2880         uint8_t abilities = 0;
2881         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2882
2883         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2884         return i40e_phy_conf_link(hw, abilities, speed, false);
2885 }
2886
2887 static __rte_always_inline void
2888 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2889 {
2890 /* Link status registers and values*/
2891 #define I40E_PRTMAC_LINKSTA             0x001E2420
2892 #define I40E_REG_LINK_UP                0x40000080
2893 #define I40E_PRTMAC_MACC                0x001E24E0
2894 #define I40E_REG_MACC_25GB              0x00020000
2895 #define I40E_REG_SPEED_MASK             0x38000000
2896 #define I40E_REG_SPEED_0                0x00000000
2897 #define I40E_REG_SPEED_1                0x08000000
2898 #define I40E_REG_SPEED_2                0x10000000
2899 #define I40E_REG_SPEED_3                0x18000000
2900 #define I40E_REG_SPEED_4                0x20000000
2901         uint32_t link_speed;
2902         uint32_t reg_val;
2903
2904         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2905         link_speed = reg_val & I40E_REG_SPEED_MASK;
2906         reg_val &= I40E_REG_LINK_UP;
2907         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2908
2909         if (unlikely(link->link_status == 0))
2910                 return;
2911
2912         /* Parse the link status */
2913         switch (link_speed) {
2914         case I40E_REG_SPEED_0:
2915                 link->link_speed = ETH_SPEED_NUM_100M;
2916                 break;
2917         case I40E_REG_SPEED_1:
2918                 link->link_speed = ETH_SPEED_NUM_1G;
2919                 break;
2920         case I40E_REG_SPEED_2:
2921                 if (hw->mac.type == I40E_MAC_X722)
2922                         link->link_speed = ETH_SPEED_NUM_2_5G;
2923                 else
2924                         link->link_speed = ETH_SPEED_NUM_10G;
2925                 break;
2926         case I40E_REG_SPEED_3:
2927                 if (hw->mac.type == I40E_MAC_X722) {
2928                         link->link_speed = ETH_SPEED_NUM_5G;
2929                 } else {
2930                         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2931
2932                         if (reg_val & I40E_REG_MACC_25GB)
2933                                 link->link_speed = ETH_SPEED_NUM_25G;
2934                         else
2935                                 link->link_speed = ETH_SPEED_NUM_40G;
2936                 }
2937                 break;
2938         case I40E_REG_SPEED_4:
2939                 if (hw->mac.type == I40E_MAC_X722)
2940                         link->link_speed = ETH_SPEED_NUM_10G;
2941                 else
2942                         link->link_speed = ETH_SPEED_NUM_20G;
2943                 break;
2944         default:
2945                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2946                 break;
2947         }
2948 }
2949
2950 static __rte_always_inline void
2951 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2952         bool enable_lse, int wait_to_complete)
2953 {
2954 #define CHECK_INTERVAL             100  /* 100ms */
2955 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2956         uint32_t rep_cnt = MAX_REPEAT_TIME;
2957         struct i40e_link_status link_status;
2958         int status;
2959
2960         memset(&link_status, 0, sizeof(link_status));
2961
2962         do {
2963                 memset(&link_status, 0, sizeof(link_status));
2964
2965                 /* Get link status information from hardware */
2966                 status = i40e_aq_get_link_info(hw, enable_lse,
2967                                                 &link_status, NULL);
2968                 if (unlikely(status != I40E_SUCCESS)) {
2969                         link->link_speed = ETH_SPEED_NUM_NONE;
2970                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2971                         PMD_DRV_LOG(ERR, "Failed to get link info");
2972                         return;
2973                 }
2974
2975                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2976                 if (!wait_to_complete || link->link_status)
2977                         break;
2978
2979                 rte_delay_ms(CHECK_INTERVAL);
2980         } while (--rep_cnt);
2981
2982         /* Parse the link status */
2983         switch (link_status.link_speed) {
2984         case I40E_LINK_SPEED_100MB:
2985                 link->link_speed = ETH_SPEED_NUM_100M;
2986                 break;
2987         case I40E_LINK_SPEED_1GB:
2988                 link->link_speed = ETH_SPEED_NUM_1G;
2989                 break;
2990         case I40E_LINK_SPEED_10GB:
2991                 link->link_speed = ETH_SPEED_NUM_10G;
2992                 break;
2993         case I40E_LINK_SPEED_20GB:
2994                 link->link_speed = ETH_SPEED_NUM_20G;
2995                 break;
2996         case I40E_LINK_SPEED_25GB:
2997                 link->link_speed = ETH_SPEED_NUM_25G;
2998                 break;
2999         case I40E_LINK_SPEED_40GB:
3000                 link->link_speed = ETH_SPEED_NUM_40G;
3001                 break;
3002         default:
3003                 if (link->link_status)
3004                         link->link_speed = ETH_SPEED_NUM_UNKNOWN;
3005                 else
3006                         link->link_speed = ETH_SPEED_NUM_NONE;
3007                 break;
3008         }
3009 }
3010
3011 int
3012 i40e_dev_link_update(struct rte_eth_dev *dev,
3013                      int wait_to_complete)
3014 {
3015         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3016         struct rte_eth_link link;
3017         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3018         int ret;
3019
3020         memset(&link, 0, sizeof(link));
3021
3022         /* i40e uses full duplex only */
3023         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3024         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3025                         ETH_LINK_SPEED_FIXED);
3026
3027         if (!wait_to_complete && !enable_lse)
3028                 update_link_reg(hw, &link);
3029         else
3030                 update_link_aq(hw, &link, enable_lse, wait_to_complete);
3031
3032         if (hw->switch_dev)
3033                 rte_eth_linkstatus_get(hw->switch_dev, &link);
3034
3035         ret = rte_eth_linkstatus_set(dev, &link);
3036         i40e_notify_all_vfs_link_status(dev);
3037
3038         return ret;
3039 }
3040
3041 static void
3042 i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
3043                           uint32_t loreg, bool offset_loaded, uint64_t *offset,
3044                           uint64_t *stat, uint64_t *prev_stat)
3045 {
3046         i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
3047         /* enlarge the limitation when statistics counters overflowed */
3048         if (offset_loaded) {
3049                 if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
3050                         *stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
3051                 *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
3052         }
3053         *prev_stat = *stat;
3054 }
3055
3056 /* Get all the statistics of a VSI */
3057 void
3058 i40e_update_vsi_stats(struct i40e_vsi *vsi)
3059 {
3060         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3061         struct i40e_eth_stats *nes = &vsi->eth_stats;
3062         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3063         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3064
3065         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3066                                   vsi->offset_loaded, &oes->rx_bytes,
3067                                   &nes->rx_bytes, &vsi->prev_rx_bytes);
3068         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3069                             vsi->offset_loaded, &oes->rx_unicast,
3070                             &nes->rx_unicast);
3071         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3072                             vsi->offset_loaded, &oes->rx_multicast,
3073                             &nes->rx_multicast);
3074         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3075                             vsi->offset_loaded, &oes->rx_broadcast,
3076                             &nes->rx_broadcast);
3077         /* exclude CRC bytes */
3078         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3079                 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3080
3081         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3082                             &oes->rx_discards, &nes->rx_discards);
3083         /* GLV_REPC not supported */
3084         /* GLV_RMPC not supported */
3085         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3086                             &oes->rx_unknown_protocol,
3087                             &nes->rx_unknown_protocol);
3088         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3089                                   vsi->offset_loaded, &oes->tx_bytes,
3090                                   &nes->tx_bytes, &vsi->prev_tx_bytes);
3091         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3092                             vsi->offset_loaded, &oes->tx_unicast,
3093                             &nes->tx_unicast);
3094         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3095                             vsi->offset_loaded, &oes->tx_multicast,
3096                             &nes->tx_multicast);
3097         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3098                             vsi->offset_loaded,  &oes->tx_broadcast,
3099                             &nes->tx_broadcast);
3100         /* GLV_TDPC not supported */
3101         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3102                             &oes->tx_errors, &nes->tx_errors);
3103         vsi->offset_loaded = true;
3104
3105         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3106                     vsi->vsi_id);
3107         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3108         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3109         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3110         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3111         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3112         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3113                     nes->rx_unknown_protocol);
3114         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3115         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3116         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3117         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3118         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3119         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3120         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3121                     vsi->vsi_id);
3122 }
3123
3124 static void
3125 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3126 {
3127         unsigned int i;
3128         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3129         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
3130
3131         /* Get rx/tx bytes of internal transfer packets */
3132         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
3133                                   I40E_GLV_GORCL(hw->port),
3134                                   pf->offset_loaded,
3135                                   &pf->internal_stats_offset.rx_bytes,
3136                                   &pf->internal_stats.rx_bytes,
3137                                   &pf->internal_prev_rx_bytes);
3138         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
3139                                   I40E_GLV_GOTCL(hw->port),
3140                                   pf->offset_loaded,
3141                                   &pf->internal_stats_offset.tx_bytes,
3142                                   &pf->internal_stats.tx_bytes,
3143                                   &pf->internal_prev_tx_bytes);
3144         /* Get total internal rx packet count */
3145         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3146                             I40E_GLV_UPRCL(hw->port),
3147                             pf->offset_loaded,
3148                             &pf->internal_stats_offset.rx_unicast,
3149                             &pf->internal_stats.rx_unicast);
3150         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3151                             I40E_GLV_MPRCL(hw->port),
3152                             pf->offset_loaded,
3153                             &pf->internal_stats_offset.rx_multicast,
3154                             &pf->internal_stats.rx_multicast);
3155         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3156                             I40E_GLV_BPRCL(hw->port),
3157                             pf->offset_loaded,
3158                             &pf->internal_stats_offset.rx_broadcast,
3159                             &pf->internal_stats.rx_broadcast);
3160         /* Get total internal tx packet count */
3161         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3162                             I40E_GLV_UPTCL(hw->port),
3163                             pf->offset_loaded,
3164                             &pf->internal_stats_offset.tx_unicast,
3165                             &pf->internal_stats.tx_unicast);
3166         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3167                             I40E_GLV_MPTCL(hw->port),
3168                             pf->offset_loaded,
3169                             &pf->internal_stats_offset.tx_multicast,
3170                             &pf->internal_stats.tx_multicast);
3171         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3172                             I40E_GLV_BPTCL(hw->port),
3173                             pf->offset_loaded,
3174                             &pf->internal_stats_offset.tx_broadcast,
3175                             &pf->internal_stats.tx_broadcast);
3176
3177         /* exclude CRC size */
3178         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3179                 pf->internal_stats.rx_multicast +
3180                 pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3181
3182         /* Get statistics of struct i40e_eth_stats */
3183         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3184                                   I40E_GLPRT_GORCL(hw->port),
3185                                   pf->offset_loaded, &os->eth.rx_bytes,
3186                                   &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3187         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3188                             I40E_GLPRT_UPRCL(hw->port),
3189                             pf->offset_loaded, &os->eth.rx_unicast,
3190                             &ns->eth.rx_unicast);
3191         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3192                             I40E_GLPRT_MPRCL(hw->port),
3193                             pf->offset_loaded, &os->eth.rx_multicast,
3194                             &ns->eth.rx_multicast);
3195         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3196                             I40E_GLPRT_BPRCL(hw->port),
3197                             pf->offset_loaded, &os->eth.rx_broadcast,
3198                             &ns->eth.rx_broadcast);
3199         /* Workaround: CRC size should not be included in byte statistics,
3200          * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3201          * packet.
3202          */
3203         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3204                 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3205
3206         /* exclude internal rx bytes
3207          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
3208          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
3209          * value.
3210          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
3211          */
3212         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3213                 ns->eth.rx_bytes = 0;
3214         else
3215                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3216
3217         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3218                 ns->eth.rx_unicast = 0;
3219         else
3220                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3221
3222         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3223                 ns->eth.rx_multicast = 0;
3224         else
3225                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3226
3227         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3228                 ns->eth.rx_broadcast = 0;
3229         else
3230                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3231
3232         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3233                             pf->offset_loaded, &os->eth.rx_discards,
3234                             &ns->eth.rx_discards);
3235         /* GLPRT_REPC not supported */
3236         /* GLPRT_RMPC not supported */
3237         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3238                             pf->offset_loaded,
3239                             &os->eth.rx_unknown_protocol,
3240                             &ns->eth.rx_unknown_protocol);
3241         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3242                                   I40E_GLPRT_GOTCL(hw->port),
3243                                   pf->offset_loaded, &os->eth.tx_bytes,
3244                                   &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3245         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3246                             I40E_GLPRT_UPTCL(hw->port),
3247                             pf->offset_loaded, &os->eth.tx_unicast,
3248                             &ns->eth.tx_unicast);
3249         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3250                             I40E_GLPRT_MPTCL(hw->port),
3251                             pf->offset_loaded, &os->eth.tx_multicast,
3252                             &ns->eth.tx_multicast);
3253         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3254                             I40E_GLPRT_BPTCL(hw->port),
3255                             pf->offset_loaded, &os->eth.tx_broadcast,
3256                             &ns->eth.tx_broadcast);
3257         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3258                 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3259
3260         /* exclude internal tx bytes
3261          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3262          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3263          * value.
3264          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3265          */
3266         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3267                 ns->eth.tx_bytes = 0;
3268         else
3269                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3270
3271         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3272                 ns->eth.tx_unicast = 0;
3273         else
3274                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3275
3276         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3277                 ns->eth.tx_multicast = 0;
3278         else
3279                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3280
3281         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3282                 ns->eth.tx_broadcast = 0;
3283         else
3284                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3285
3286         /* GLPRT_TEPC not supported */
3287
3288         /* additional port specific stats */
3289         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3290                             pf->offset_loaded, &os->tx_dropped_link_down,
3291                             &ns->tx_dropped_link_down);
3292         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3293                             pf->offset_loaded, &os->crc_errors,
3294                             &ns->crc_errors);
3295         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3296                             pf->offset_loaded, &os->illegal_bytes,
3297                             &ns->illegal_bytes);
3298         /* GLPRT_ERRBC not supported */
3299         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3300                             pf->offset_loaded, &os->mac_local_faults,
3301                             &ns->mac_local_faults);
3302         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3303                             pf->offset_loaded, &os->mac_remote_faults,
3304                             &ns->mac_remote_faults);
3305         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3306                             pf->offset_loaded, &os->rx_length_errors,
3307                             &ns->rx_length_errors);
3308         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3309                             pf->offset_loaded, &os->link_xon_rx,
3310                             &ns->link_xon_rx);
3311         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3312                             pf->offset_loaded, &os->link_xoff_rx,
3313                             &ns->link_xoff_rx);
3314         for (i = 0; i < 8; i++) {
3315                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3316                                     pf->offset_loaded,
3317                                     &os->priority_xon_rx[i],
3318                                     &ns->priority_xon_rx[i]);
3319                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3320                                     pf->offset_loaded,
3321                                     &os->priority_xoff_rx[i],
3322                                     &ns->priority_xoff_rx[i]);
3323         }
3324         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3325                             pf->offset_loaded, &os->link_xon_tx,
3326                             &ns->link_xon_tx);
3327         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3328                             pf->offset_loaded, &os->link_xoff_tx,
3329                             &ns->link_xoff_tx);
3330         for (i = 0; i < 8; i++) {
3331                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3332                                     pf->offset_loaded,
3333                                     &os->priority_xon_tx[i],
3334                                     &ns->priority_xon_tx[i]);
3335                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3336                                     pf->offset_loaded,
3337                                     &os->priority_xoff_tx[i],
3338                                     &ns->priority_xoff_tx[i]);
3339                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3340                                     pf->offset_loaded,
3341                                     &os->priority_xon_2_xoff[i],
3342                                     &ns->priority_xon_2_xoff[i]);
3343         }
3344         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3345                             I40E_GLPRT_PRC64L(hw->port),
3346                             pf->offset_loaded, &os->rx_size_64,
3347                             &ns->rx_size_64);
3348         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3349                             I40E_GLPRT_PRC127L(hw->port),
3350                             pf->offset_loaded, &os->rx_size_127,
3351                             &ns->rx_size_127);
3352         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3353                             I40E_GLPRT_PRC255L(hw->port),
3354                             pf->offset_loaded, &os->rx_size_255,
3355                             &ns->rx_size_255);
3356         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3357                             I40E_GLPRT_PRC511L(hw->port),
3358                             pf->offset_loaded, &os->rx_size_511,
3359                             &ns->rx_size_511);
3360         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3361                             I40E_GLPRT_PRC1023L(hw->port),
3362                             pf->offset_loaded, &os->rx_size_1023,
3363                             &ns->rx_size_1023);
3364         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3365                             I40E_GLPRT_PRC1522L(hw->port),
3366                             pf->offset_loaded, &os->rx_size_1522,
3367                             &ns->rx_size_1522);
3368         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3369                             I40E_GLPRT_PRC9522L(hw->port),
3370                             pf->offset_loaded, &os->rx_size_big,
3371                             &ns->rx_size_big);
3372         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3373                             pf->offset_loaded, &os->rx_undersize,
3374                             &ns->rx_undersize);
3375         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3376                             pf->offset_loaded, &os->rx_fragments,
3377                             &ns->rx_fragments);
3378         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3379                             pf->offset_loaded, &os->rx_oversize,
3380                             &ns->rx_oversize);
3381         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3382                             pf->offset_loaded, &os->rx_jabber,
3383                             &ns->rx_jabber);
3384         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3385                             I40E_GLPRT_PTC64L(hw->port),
3386                             pf->offset_loaded, &os->tx_size_64,
3387                             &ns->tx_size_64);
3388         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3389                             I40E_GLPRT_PTC127L(hw->port),
3390                             pf->offset_loaded, &os->tx_size_127,
3391                             &ns->tx_size_127);
3392         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3393                             I40E_GLPRT_PTC255L(hw->port),
3394                             pf->offset_loaded, &os->tx_size_255,
3395                             &ns->tx_size_255);
3396         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3397                             I40E_GLPRT_PTC511L(hw->port),
3398                             pf->offset_loaded, &os->tx_size_511,
3399                             &ns->tx_size_511);
3400         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3401                             I40E_GLPRT_PTC1023L(hw->port),
3402                             pf->offset_loaded, &os->tx_size_1023,
3403                             &ns->tx_size_1023);
3404         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3405                             I40E_GLPRT_PTC1522L(hw->port),
3406                             pf->offset_loaded, &os->tx_size_1522,
3407                             &ns->tx_size_1522);
3408         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3409                             I40E_GLPRT_PTC9522L(hw->port),
3410                             pf->offset_loaded, &os->tx_size_big,
3411                             &ns->tx_size_big);
3412         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3413                            pf->offset_loaded,
3414                            &os->fd_sb_match, &ns->fd_sb_match);
3415         /* GLPRT_MSPDC not supported */
3416         /* GLPRT_XEC not supported */
3417
3418         pf->offset_loaded = true;
3419
3420         if (pf->main_vsi)
3421                 i40e_update_vsi_stats(pf->main_vsi);
3422 }
3423
3424 /* Get all statistics of a port */
3425 static int
3426 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3427 {
3428         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3429         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3430         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3431         struct i40e_vsi *vsi;
3432         unsigned i;
3433
3434         /* call read registers - updates values, now write them to struct */
3435         i40e_read_stats_registers(pf, hw);
3436
3437         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3438                         pf->main_vsi->eth_stats.rx_multicast +
3439                         pf->main_vsi->eth_stats.rx_broadcast -
3440                         pf->main_vsi->eth_stats.rx_discards;
3441         stats->opackets = ns->eth.tx_unicast +
3442                         ns->eth.tx_multicast +
3443                         ns->eth.tx_broadcast;
3444         stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3445         stats->obytes   = ns->eth.tx_bytes;
3446         stats->oerrors  = ns->eth.tx_errors +
3447                         pf->main_vsi->eth_stats.tx_errors;
3448
3449         /* Rx Errors */
3450         stats->imissed  = ns->eth.rx_discards +
3451                         pf->main_vsi->eth_stats.rx_discards;
3452         stats->ierrors  = ns->crc_errors +
3453                         ns->rx_length_errors + ns->rx_undersize +
3454                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3455
3456         if (pf->vfs) {
3457                 for (i = 0; i < pf->vf_num; i++) {
3458                         vsi = pf->vfs[i].vsi;
3459                         i40e_update_vsi_stats(vsi);
3460
3461                         stats->ipackets += (vsi->eth_stats.rx_unicast +
3462                                         vsi->eth_stats.rx_multicast +
3463                                         vsi->eth_stats.rx_broadcast -
3464                                         vsi->eth_stats.rx_discards);
3465                         stats->ibytes   += vsi->eth_stats.rx_bytes;
3466                         stats->oerrors  += vsi->eth_stats.tx_errors;
3467                         stats->imissed  += vsi->eth_stats.rx_discards;
3468                 }
3469         }
3470
3471         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3472         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3473         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3474         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3475         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3476         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3477         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3478                     ns->eth.rx_unknown_protocol);
3479         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3480         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3481         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3482         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3483         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3484         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3485
3486         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3487                     ns->tx_dropped_link_down);
3488         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3489         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3490                     ns->illegal_bytes);
3491         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3492         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3493                     ns->mac_local_faults);
3494         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3495                     ns->mac_remote_faults);
3496         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3497                     ns->rx_length_errors);
3498         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3499         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3500         for (i = 0; i < 8; i++) {
3501                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3502                                 i, ns->priority_xon_rx[i]);
3503                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3504                                 i, ns->priority_xoff_rx[i]);
3505         }
3506         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3507         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3508         for (i = 0; i < 8; i++) {
3509                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3510                                 i, ns->priority_xon_tx[i]);
3511                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3512                                 i, ns->priority_xoff_tx[i]);
3513                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3514                                 i, ns->priority_xon_2_xoff[i]);
3515         }
3516         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3517         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3518         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3519         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3520         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3521         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3522         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3523         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3524         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3525         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3526         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3527         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3528         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3529         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3530         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3531         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3532         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3533         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3534         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3535                         ns->mac_short_packet_dropped);
3536         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3537                     ns->checksum_error);
3538         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3539         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3540         return 0;
3541 }
3542
3543 /* Reset the statistics */
3544 static int
3545 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3546 {
3547         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3548         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3549
3550         /* Mark PF and VSI stats to update the offset, aka "reset" */
3551         pf->offset_loaded = false;
3552         if (pf->main_vsi)
3553                 pf->main_vsi->offset_loaded = false;
3554
3555         /* read the stats, reading current register values into offset */
3556         i40e_read_stats_registers(pf, hw);
3557
3558         return 0;
3559 }
3560
3561 static uint32_t
3562 i40e_xstats_calc_num(void)
3563 {
3564         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3565                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3566                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3567 }
3568
3569 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3570                                      struct rte_eth_xstat_name *xstats_names,
3571                                      __rte_unused unsigned limit)
3572 {
3573         unsigned count = 0;
3574         unsigned i, prio;
3575
3576         if (xstats_names == NULL)
3577                 return i40e_xstats_calc_num();
3578
3579         /* Note: limit checked in rte_eth_xstats_names() */
3580
3581         /* Get stats from i40e_eth_stats struct */
3582         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3583                 strlcpy(xstats_names[count].name,
3584                         rte_i40e_stats_strings[i].name,
3585                         sizeof(xstats_names[count].name));
3586                 count++;
3587         }
3588
3589         /* Get individiual stats from i40e_hw_port struct */
3590         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3591                 strlcpy(xstats_names[count].name,
3592                         rte_i40e_hw_port_strings[i].name,
3593                         sizeof(xstats_names[count].name));
3594                 count++;
3595         }
3596
3597         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3598                 for (prio = 0; prio < 8; prio++) {
3599                         snprintf(xstats_names[count].name,
3600                                  sizeof(xstats_names[count].name),
3601                                  "rx_priority%u_%s", prio,
3602                                  rte_i40e_rxq_prio_strings[i].name);
3603                         count++;
3604                 }
3605         }
3606
3607         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3608                 for (prio = 0; prio < 8; prio++) {
3609                         snprintf(xstats_names[count].name,
3610                                  sizeof(xstats_names[count].name),
3611                                  "tx_priority%u_%s", prio,
3612                                  rte_i40e_txq_prio_strings[i].name);
3613                         count++;
3614                 }
3615         }
3616         return count;
3617 }
3618
3619 static int
3620 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3621                     unsigned n)
3622 {
3623         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3624         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3625         unsigned i, count, prio;
3626         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3627
3628         count = i40e_xstats_calc_num();
3629         if (n < count)
3630                 return count;
3631
3632         i40e_read_stats_registers(pf, hw);
3633
3634         if (xstats == NULL)
3635                 return 0;
3636
3637         count = 0;
3638
3639         /* Get stats from i40e_eth_stats struct */
3640         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3641                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3642                         rte_i40e_stats_strings[i].offset);
3643                 xstats[count].id = count;
3644                 count++;
3645         }
3646
3647         /* Get individiual stats from i40e_hw_port struct */
3648         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3649                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3650                         rte_i40e_hw_port_strings[i].offset);
3651                 xstats[count].id = count;
3652                 count++;
3653         }
3654
3655         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3656                 for (prio = 0; prio < 8; prio++) {
3657                         xstats[count].value =
3658                                 *(uint64_t *)(((char *)hw_stats) +
3659                                 rte_i40e_rxq_prio_strings[i].offset +
3660                                 (sizeof(uint64_t) * prio));
3661                         xstats[count].id = count;
3662                         count++;
3663                 }
3664         }
3665
3666         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3667                 for (prio = 0; prio < 8; prio++) {
3668                         xstats[count].value =
3669                                 *(uint64_t *)(((char *)hw_stats) +
3670                                 rte_i40e_txq_prio_strings[i].offset +
3671                                 (sizeof(uint64_t) * prio));
3672                         xstats[count].id = count;
3673                         count++;
3674                 }
3675         }
3676
3677         return count;
3678 }
3679
3680 static int
3681 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3682 {
3683         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3684         u32 full_ver;
3685         u8 ver, patch;
3686         u16 build;
3687         int ret;
3688
3689         full_ver = hw->nvm.oem_ver;
3690         ver = (u8)(full_ver >> 24);
3691         build = (u16)((full_ver >> 8) & 0xffff);
3692         patch = (u8)(full_ver & 0xff);
3693
3694         ret = snprintf(fw_version, fw_size,
3695                  "%d.%d%d 0x%08x %d.%d.%d",
3696                  ((hw->nvm.version >> 12) & 0xf),
3697                  ((hw->nvm.version >> 4) & 0xff),
3698                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3699                  ver, build, patch);
3700         if (ret < 0)
3701                 return -EINVAL;
3702
3703         ret += 1; /* add the size of '\0' */
3704         if (fw_size < (size_t)ret)
3705                 return ret;
3706         else
3707                 return 0;
3708 }
3709
3710 /*
3711  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3712  * the Rx data path does not hang if the FW LLDP is stopped.
3713  * return true if lldp need to stop
3714  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3715  */
3716 static bool
3717 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3718 {
3719         double nvm_ver;
3720         char ver_str[64] = {0};
3721         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3722
3723         i40e_fw_version_get(dev, ver_str, 64);
3724         nvm_ver = atof(ver_str);
3725         if ((hw->mac.type == I40E_MAC_X722 ||
3726              hw->mac.type == I40E_MAC_X722_VF) &&
3727              ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3728                 return true;
3729         else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3730                 return true;
3731
3732         return false;
3733 }
3734
3735 static int
3736 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3737 {
3738         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3739         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3740         struct i40e_vsi *vsi = pf->main_vsi;
3741         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3742
3743         dev_info->max_rx_queues = vsi->nb_qps;
3744         dev_info->max_tx_queues = vsi->nb_qps;
3745         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3746         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3747         dev_info->max_mac_addrs = vsi->max_macaddrs;
3748         dev_info->max_vfs = pci_dev->max_vfs;
3749         dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3750         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3751         dev_info->rx_queue_offload_capa = 0;
3752         dev_info->rx_offload_capa =
3753                 DEV_RX_OFFLOAD_VLAN_STRIP |
3754                 DEV_RX_OFFLOAD_QINQ_STRIP |
3755                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3756                 DEV_RX_OFFLOAD_UDP_CKSUM |
3757                 DEV_RX_OFFLOAD_TCP_CKSUM |
3758                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3759                 DEV_RX_OFFLOAD_KEEP_CRC |
3760                 DEV_RX_OFFLOAD_SCATTER |
3761                 DEV_RX_OFFLOAD_VLAN_EXTEND |
3762                 DEV_RX_OFFLOAD_VLAN_FILTER |
3763                 DEV_RX_OFFLOAD_JUMBO_FRAME |
3764                 DEV_RX_OFFLOAD_RSS_HASH;
3765
3766         dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3767         dev_info->tx_offload_capa =
3768                 DEV_TX_OFFLOAD_VLAN_INSERT |
3769                 DEV_TX_OFFLOAD_QINQ_INSERT |
3770                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3771                 DEV_TX_OFFLOAD_UDP_CKSUM |
3772                 DEV_TX_OFFLOAD_TCP_CKSUM |
3773                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3774                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3775                 DEV_TX_OFFLOAD_TCP_TSO |
3776                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3777                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3778                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3779                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3780                 DEV_TX_OFFLOAD_MULTI_SEGS |
3781                 dev_info->tx_queue_offload_capa;
3782         dev_info->dev_capa =
3783                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3784                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3785
3786         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3787                                                 sizeof(uint32_t);
3788         dev_info->reta_size = pf->hash_lut_size;
3789         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3790
3791         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3792                 .rx_thresh = {
3793                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3794                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3795                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3796                 },
3797                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3798                 .rx_drop_en = 0,
3799                 .offloads = 0,
3800         };
3801
3802         dev_info->default_txconf = (struct rte_eth_txconf) {
3803                 .tx_thresh = {
3804                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3805                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3806                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3807                 },
3808                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3809                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3810                 .offloads = 0,
3811         };
3812
3813         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3814                 .nb_max = I40E_MAX_RING_DESC,
3815                 .nb_min = I40E_MIN_RING_DESC,
3816                 .nb_align = I40E_ALIGN_RING_DESC,
3817         };
3818
3819         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3820                 .nb_max = I40E_MAX_RING_DESC,
3821                 .nb_min = I40E_MIN_RING_DESC,
3822                 .nb_align = I40E_ALIGN_RING_DESC,
3823                 .nb_seg_max = I40E_TX_MAX_SEG,
3824                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3825         };
3826
3827         if (pf->flags & I40E_FLAG_VMDQ) {
3828                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3829                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3830                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3831                                                 pf->max_nb_vmdq_vsi;
3832                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3833                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3834                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3835         }
3836
3837         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3838                 /* For XL710 */
3839                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3840                 dev_info->default_rxportconf.nb_queues = 2;
3841                 dev_info->default_txportconf.nb_queues = 2;
3842                 if (dev->data->nb_rx_queues == 1)
3843                         dev_info->default_rxportconf.ring_size = 2048;
3844                 else
3845                         dev_info->default_rxportconf.ring_size = 1024;
3846                 if (dev->data->nb_tx_queues == 1)
3847                         dev_info->default_txportconf.ring_size = 1024;
3848                 else
3849                         dev_info->default_txportconf.ring_size = 512;
3850
3851         } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3852                 /* For XXV710 */
3853                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3854                 dev_info->default_rxportconf.nb_queues = 1;
3855                 dev_info->default_txportconf.nb_queues = 1;
3856                 dev_info->default_rxportconf.ring_size = 256;
3857                 dev_info->default_txportconf.ring_size = 256;
3858         } else {
3859                 /* For X710 */
3860                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3861                 dev_info->default_rxportconf.nb_queues = 1;
3862                 dev_info->default_txportconf.nb_queues = 1;
3863                 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3864                         dev_info->default_rxportconf.ring_size = 512;
3865                         dev_info->default_txportconf.ring_size = 256;
3866                 } else {
3867                         dev_info->default_rxportconf.ring_size = 256;
3868                         dev_info->default_txportconf.ring_size = 256;
3869                 }
3870         }
3871         dev_info->default_rxportconf.burst_size = 32;
3872         dev_info->default_txportconf.burst_size = 32;
3873
3874         return 0;
3875 }
3876
3877 static int
3878 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3879 {
3880         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3881         struct i40e_vsi *vsi = pf->main_vsi;
3882         PMD_INIT_FUNC_TRACE();
3883
3884         if (on)
3885                 return i40e_vsi_add_vlan(vsi, vlan_id);
3886         else
3887                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3888 }
3889
3890 static int
3891 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3892                                 enum rte_vlan_type vlan_type,
3893                                 uint16_t tpid, int qinq)
3894 {
3895         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3896         uint64_t reg_r = 0;
3897         uint64_t reg_w = 0;
3898         uint16_t reg_id = 3;
3899         int ret;
3900
3901         if (qinq) {
3902                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3903                         reg_id = 2;
3904         }
3905
3906         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3907                                           &reg_r, NULL);
3908         if (ret != I40E_SUCCESS) {
3909                 PMD_DRV_LOG(ERR,
3910                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3911                            reg_id);
3912                 return -EIO;
3913         }
3914         PMD_DRV_LOG(DEBUG,
3915                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3916                     reg_id, reg_r);
3917
3918         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3919         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3920         if (reg_r == reg_w) {
3921                 PMD_DRV_LOG(DEBUG, "No need to write");
3922                 return 0;
3923         }
3924
3925         ret = i40e_aq_debug_write_global_register(hw,
3926                                            I40E_GL_SWT_L2TAGCTRL(reg_id),
3927                                            reg_w, NULL);
3928         if (ret != I40E_SUCCESS) {
3929                 PMD_DRV_LOG(ERR,
3930                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3931                             reg_id);
3932                 return -EIO;
3933         }
3934         PMD_DRV_LOG(DEBUG,
3935                     "Global register 0x%08x is changed with value 0x%08x",
3936                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3937
3938         return 0;
3939 }
3940
3941 static int
3942 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3943                    enum rte_vlan_type vlan_type,
3944                    uint16_t tpid)
3945 {
3946         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3947         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3948         int qinq = dev->data->dev_conf.rxmode.offloads &
3949                    DEV_RX_OFFLOAD_VLAN_EXTEND;
3950         int ret = 0;
3951
3952         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3953              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3954             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3955                 PMD_DRV_LOG(ERR,
3956                             "Unsupported vlan type.");
3957                 return -EINVAL;
3958         }
3959
3960         if (pf->support_multi_driver) {
3961                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3962                 return -ENOTSUP;
3963         }
3964
3965         /* 802.1ad frames ability is added in NVM API 1.7*/
3966         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3967                 if (qinq) {
3968                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3969                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3970                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3971                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3972                 } else {
3973                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3974                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3975                 }
3976                 ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3977                 if (ret != I40E_SUCCESS) {
3978                         PMD_DRV_LOG(ERR,
3979                                     "Set switch config failed aq_err: %d",
3980                                     hw->aq.asq_last_status);
3981                         ret = -EIO;
3982                 }
3983         } else
3984                 /* If NVM API < 1.7, keep the register setting */
3985                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3986                                                       tpid, qinq);
3987
3988         return ret;
3989 }
3990
3991 /* Configure outer vlan stripping on or off in QinQ mode */
3992 static int
3993 i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
3994 {
3995         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3996         int ret = I40E_SUCCESS;
3997         uint32_t reg;
3998
3999         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
4000                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
4001                 return -EINVAL;
4002         }
4003
4004         /* Configure for outer VLAN RX stripping */
4005         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
4006
4007         if (on)
4008                 reg |= I40E_VSI_TSR_QINQ_STRIP;
4009         else
4010                 reg &= ~I40E_VSI_TSR_QINQ_STRIP;
4011
4012         ret = i40e_aq_debug_write_register(hw,
4013                                                    I40E_VSI_TSR(vsi->vsi_id),
4014                                                    reg, NULL);
4015         if (ret < 0) {
4016                 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
4017                                     vsi->vsi_id);
4018                 return I40E_ERR_CONFIG;
4019         }
4020
4021         return ret;
4022 }
4023
4024 static int
4025 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4026 {
4027         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4028         struct i40e_vsi *vsi = pf->main_vsi;
4029         struct rte_eth_rxmode *rxmode;
4030
4031         rxmode = &dev->data->dev_conf.rxmode;
4032         if (mask & ETH_VLAN_FILTER_MASK) {
4033                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4034                         i40e_vsi_config_vlan_filter(vsi, TRUE);
4035                 else
4036                         i40e_vsi_config_vlan_filter(vsi, FALSE);
4037         }
4038
4039         if (mask & ETH_VLAN_STRIP_MASK) {
4040                 /* Enable or disable VLAN stripping */
4041                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4042                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
4043                 else
4044                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
4045         }
4046
4047         if (mask & ETH_VLAN_EXTEND_MASK) {
4048                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
4049                         i40e_vsi_config_double_vlan(vsi, TRUE);
4050                         /* Set global registers with default ethertype. */
4051                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
4052                                            RTE_ETHER_TYPE_VLAN);
4053                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
4054                                            RTE_ETHER_TYPE_VLAN);
4055                 }
4056                 else
4057                         i40e_vsi_config_double_vlan(vsi, FALSE);
4058         }
4059
4060         if (mask & ETH_QINQ_STRIP_MASK) {
4061                 /* Enable or disable outer VLAN stripping */
4062                 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
4063                         i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
4064                 else
4065                         i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
4066         }
4067
4068         return 0;
4069 }
4070
4071 static void
4072 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4073                           __rte_unused uint16_t queue,
4074                           __rte_unused int on)
4075 {
4076         PMD_INIT_FUNC_TRACE();
4077 }
4078
4079 static int
4080 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4081 {
4082         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4083         struct i40e_vsi *vsi = pf->main_vsi;
4084         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4085         struct i40e_vsi_vlan_pvid_info info;
4086
4087         memset(&info, 0, sizeof(info));
4088         info.on = on;
4089         if (info.on)
4090                 info.config.pvid = pvid;
4091         else {
4092                 info.config.reject.tagged =
4093                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
4094                 info.config.reject.untagged =
4095                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
4096         }
4097
4098         return i40e_vsi_vlan_pvid_set(vsi, &info);
4099 }
4100
4101 static int
4102 i40e_dev_led_on(struct rte_eth_dev *dev)
4103 {
4104         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4105         uint32_t mode = i40e_led_get(hw);
4106
4107         if (mode == 0)
4108                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
4109
4110         return 0;
4111 }
4112
4113 static int
4114 i40e_dev_led_off(struct rte_eth_dev *dev)
4115 {
4116         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4117         uint32_t mode = i40e_led_get(hw);
4118
4119         if (mode != 0)
4120                 i40e_led_set(hw, 0, false);
4121
4122         return 0;
4123 }
4124
4125 static int
4126 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4127 {
4128         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4129         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4130
4131         fc_conf->pause_time = pf->fc_conf.pause_time;
4132
4133         /* read out from register, in case they are modified by other port */
4134         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
4135                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
4136         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
4137                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
4138
4139         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4140         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4141
4142          /* Return current mode according to actual setting*/
4143         switch (hw->fc.current_mode) {
4144         case I40E_FC_FULL:
4145                 fc_conf->mode = RTE_FC_FULL;
4146                 break;
4147         case I40E_FC_TX_PAUSE:
4148                 fc_conf->mode = RTE_FC_TX_PAUSE;
4149                 break;
4150         case I40E_FC_RX_PAUSE:
4151                 fc_conf->mode = RTE_FC_RX_PAUSE;
4152                 break;
4153         case I40E_FC_NONE:
4154         default:
4155                 fc_conf->mode = RTE_FC_NONE;
4156         };
4157
4158         return 0;
4159 }
4160
4161 static int
4162 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4163 {
4164         uint32_t mflcn_reg, fctrl_reg, reg;
4165         uint32_t max_high_water;
4166         uint8_t i, aq_failure;
4167         int err;
4168         struct i40e_hw *hw;
4169         struct i40e_pf *pf;
4170         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4171                 [RTE_FC_NONE] = I40E_FC_NONE,
4172                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4173                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4174                 [RTE_FC_FULL] = I40E_FC_FULL
4175         };
4176
4177         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4178
4179         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4180         if ((fc_conf->high_water > max_high_water) ||
4181                         (fc_conf->high_water < fc_conf->low_water)) {
4182                 PMD_INIT_LOG(ERR,
4183                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
4184                         max_high_water);
4185                 return -EINVAL;
4186         }
4187
4188         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4189         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4190         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4191
4192         pf->fc_conf.pause_time = fc_conf->pause_time;
4193         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4194         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4195
4196         PMD_INIT_FUNC_TRACE();
4197
4198         /* All the link flow control related enable/disable register
4199          * configuration is handle by the F/W
4200          */
4201         err = i40e_set_fc(hw, &aq_failure, true);
4202         if (err < 0)
4203                 return -ENOSYS;
4204
4205         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4206                 /* Configure flow control refresh threshold,
4207                  * the value for stat_tx_pause_refresh_timer[8]
4208                  * is used for global pause operation.
4209                  */
4210
4211                 I40E_WRITE_REG(hw,
4212                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4213                                pf->fc_conf.pause_time);
4214
4215                 /* configure the timer value included in transmitted pause
4216                  * frame,
4217                  * the value for stat_tx_pause_quanta[8] is used for global
4218                  * pause operation
4219                  */
4220                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4221                                pf->fc_conf.pause_time);
4222
4223                 fctrl_reg = I40E_READ_REG(hw,
4224                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4225
4226                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4227                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4228                 else
4229                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4230
4231                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4232                                fctrl_reg);
4233         } else {
4234                 /* Configure pause time (2 TCs per register) */
4235                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4236                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4237                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4238
4239                 /* Configure flow control refresh threshold value */
4240                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4241                                pf->fc_conf.pause_time / 2);
4242
4243                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4244
4245                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
4246                  *depending on configuration
4247                  */
4248                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
4249                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4250                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4251                 } else {
4252                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4253                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4254                 }
4255
4256                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4257         }
4258
4259         if (!pf->support_multi_driver) {
4260                 /* config water marker both based on the packets and bytes */
4261                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4262                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4263                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4264                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4265                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4266                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4267                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4268                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4269                                   << I40E_KILOSHIFT);
4270                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4271                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4272                                    << I40E_KILOSHIFT);
4273         } else {
4274                 PMD_DRV_LOG(ERR,
4275                             "Water marker configuration is not supported.");
4276         }
4277
4278         I40E_WRITE_FLUSH(hw);
4279
4280         return 0;
4281 }
4282
4283 static int
4284 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4285                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4286 {
4287         PMD_INIT_FUNC_TRACE();
4288
4289         return -ENOSYS;
4290 }
4291
4292 /* Add a MAC address, and update filters */
4293 static int
4294 i40e_macaddr_add(struct rte_eth_dev *dev,
4295                  struct rte_ether_addr *mac_addr,
4296                  __rte_unused uint32_t index,
4297                  uint32_t pool)
4298 {
4299         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4300         struct i40e_mac_filter_info mac_filter;
4301         struct i40e_vsi *vsi;
4302         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4303         int ret;
4304
4305         /* If VMDQ not enabled or configured, return */
4306         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4307                           !pf->nb_cfg_vmdq_vsi)) {
4308                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4309                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4310                         pool);
4311                 return -ENOTSUP;
4312         }
4313
4314         if (pool > pf->nb_cfg_vmdq_vsi) {
4315                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4316                                 pool, pf->nb_cfg_vmdq_vsi);
4317                 return -EINVAL;
4318         }
4319
4320         rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4321         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4322                 mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
4323         else
4324                 mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
4325
4326         if (pool == 0)
4327                 vsi = pf->main_vsi;
4328         else
4329                 vsi = pf->vmdq[pool - 1].vsi;
4330
4331         ret = i40e_vsi_add_mac(vsi, &mac_filter);
4332         if (ret != I40E_SUCCESS) {
4333                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4334                 return -ENODEV;
4335         }
4336         return 0;
4337 }
4338
4339 /* Remove a MAC address, and update filters */
4340 static void
4341 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4342 {
4343         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4344         struct i40e_vsi *vsi;
4345         struct rte_eth_dev_data *data = dev->data;
4346         struct rte_ether_addr *macaddr;
4347         int ret;
4348         uint32_t i;
4349         uint64_t pool_sel;
4350
4351         macaddr = &(data->mac_addrs[index]);
4352
4353         pool_sel = dev->data->mac_pool_sel[index];
4354
4355         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4356                 if (pool_sel & (1ULL << i)) {
4357                         if (i == 0)
4358                                 vsi = pf->main_vsi;
4359                         else {
4360                                 /* No VMDQ pool enabled or configured */
4361                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
4362                                         (i > pf->nb_cfg_vmdq_vsi)) {
4363                                         PMD_DRV_LOG(ERR,
4364                                                 "No VMDQ pool enabled/configured");
4365                                         return;
4366                                 }
4367                                 vsi = pf->vmdq[i - 1].vsi;
4368                         }
4369                         ret = i40e_vsi_delete_mac(vsi, macaddr);
4370
4371                         if (ret) {
4372                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4373                                 return;
4374                         }
4375                 }
4376         }
4377 }
4378
4379 static int
4380 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4381 {
4382         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4383         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4384         uint32_t reg;
4385         int ret;
4386
4387         if (!lut)
4388                 return -EINVAL;
4389
4390         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4391                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4392                                           vsi->type != I40E_VSI_SRIOV,
4393                                           lut, lut_size);
4394                 if (ret) {
4395                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4396                         return ret;
4397                 }
4398         } else {
4399                 uint32_t *lut_dw = (uint32_t *)lut;
4400                 uint16_t i, lut_size_dw = lut_size / 4;
4401
4402                 if (vsi->type == I40E_VSI_SRIOV) {
4403                         for (i = 0; i <= lut_size_dw; i++) {
4404                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4405                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4406                         }
4407                 } else {
4408                         for (i = 0; i < lut_size_dw; i++)
4409                                 lut_dw[i] = I40E_READ_REG(hw,
4410                                                           I40E_PFQF_HLUT(i));
4411                 }
4412         }
4413
4414         return 0;
4415 }
4416
4417 int
4418 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4419 {
4420         struct i40e_pf *pf;
4421         struct i40e_hw *hw;
4422
4423         if (!vsi || !lut)
4424                 return -EINVAL;
4425
4426         pf = I40E_VSI_TO_PF(vsi);
4427         hw = I40E_VSI_TO_HW(vsi);
4428
4429         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4430                 enum i40e_status_code status;
4431
4432                 status = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4433                                              vsi->type != I40E_VSI_SRIOV,
4434                                              lut, lut_size);
4435                 if (status) {
4436                         PMD_DRV_LOG(ERR,
4437                                     "Failed to update RSS lookup table, error status: %d",
4438                                     status);
4439                         return -EIO;
4440                 }
4441         } else {
4442                 uint32_t *lut_dw = (uint32_t *)lut;
4443                 uint16_t i, lut_size_dw = lut_size / 4;
4444
4445                 if (vsi->type == I40E_VSI_SRIOV) {
4446                         for (i = 0; i < lut_size_dw; i++)
4447                                 I40E_WRITE_REG(
4448                                         hw,
4449                                         I40E_VFQF_HLUT1(i, vsi->user_param),
4450                                         lut_dw[i]);
4451                 } else {
4452                         for (i = 0; i < lut_size_dw; i++)
4453                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4454                                                lut_dw[i]);
4455                 }
4456                 I40E_WRITE_FLUSH(hw);
4457         }
4458
4459         return 0;
4460 }
4461
4462 static int
4463 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4464                          struct rte_eth_rss_reta_entry64 *reta_conf,
4465                          uint16_t reta_size)
4466 {
4467         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4468         uint16_t i, lut_size = pf->hash_lut_size;
4469         uint16_t idx, shift;
4470         uint8_t *lut;
4471         int ret;
4472
4473         if (reta_size != lut_size ||
4474                 reta_size > ETH_RSS_RETA_SIZE_512) {
4475                 PMD_DRV_LOG(ERR,
4476                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4477                         reta_size, lut_size);
4478                 return -EINVAL;
4479         }
4480
4481         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4482         if (!lut) {
4483                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4484                 return -ENOMEM;
4485         }
4486         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4487         if (ret)
4488                 goto out;
4489         for (i = 0; i < reta_size; i++) {
4490                 idx = i / RTE_RETA_GROUP_SIZE;
4491                 shift = i % RTE_RETA_GROUP_SIZE;
4492                 if (reta_conf[idx].mask & (1ULL << shift))
4493                         lut[i] = reta_conf[idx].reta[shift];
4494         }
4495         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4496
4497         pf->adapter->rss_reta_updated = 1;
4498
4499 out:
4500         rte_free(lut);
4501
4502         return ret;
4503 }
4504
4505 static int
4506 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4507                         struct rte_eth_rss_reta_entry64 *reta_conf,
4508                         uint16_t reta_size)
4509 {
4510         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4511         uint16_t i, lut_size = pf->hash_lut_size;
4512         uint16_t idx, shift;
4513         uint8_t *lut;
4514         int ret;
4515
4516         if (reta_size != lut_size ||
4517                 reta_size > ETH_RSS_RETA_SIZE_512) {
4518                 PMD_DRV_LOG(ERR,
4519                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4520                         reta_size, lut_size);
4521                 return -EINVAL;
4522         }
4523
4524         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4525         if (!lut) {
4526                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4527                 return -ENOMEM;
4528         }
4529
4530         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4531         if (ret)
4532                 goto out;
4533         for (i = 0; i < reta_size; i++) {
4534                 idx = i / RTE_RETA_GROUP_SIZE;
4535                 shift = i % RTE_RETA_GROUP_SIZE;
4536                 if (reta_conf[idx].mask & (1ULL << shift))
4537                         reta_conf[idx].reta[shift] = lut[i];
4538         }
4539
4540 out:
4541         rte_free(lut);
4542
4543         return ret;
4544 }
4545
4546 /**
4547  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4548  * @hw:   pointer to the HW structure
4549  * @mem:  pointer to mem struct to fill out
4550  * @size: size of memory requested
4551  * @alignment: what to align the allocation to
4552  **/
4553 enum i40e_status_code
4554 i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4555                         struct i40e_dma_mem *mem,
4556                         u64 size,
4557                         u32 alignment)
4558 {
4559         static uint64_t i40e_dma_memzone_id;
4560         const struct rte_memzone *mz = NULL;
4561         char z_name[RTE_MEMZONE_NAMESIZE];
4562
4563         if (!mem)
4564                 return I40E_ERR_PARAM;
4565
4566         snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
4567                 __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
4568         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4569                         RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4570         if (!mz)
4571                 return I40E_ERR_NO_MEMORY;
4572
4573         mem->size = size;
4574         mem->va = mz->addr;
4575         mem->pa = mz->iova;
4576         mem->zone = (const void *)mz;
4577         PMD_DRV_LOG(DEBUG,
4578                 "memzone %s allocated with physical address: %"PRIu64,
4579                 mz->name, mem->pa);
4580
4581         return I40E_SUCCESS;
4582 }
4583
4584 /**
4585  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4586  * @hw:   pointer to the HW structure
4587  * @mem:  ptr to mem struct to free
4588  **/
4589 enum i40e_status_code
4590 i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4591                     struct i40e_dma_mem *mem)
4592 {
4593         if (!mem)
4594                 return I40E_ERR_PARAM;
4595
4596         PMD_DRV_LOG(DEBUG,
4597                 "memzone %s to be freed with physical address: %"PRIu64,
4598                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4599         rte_memzone_free((const struct rte_memzone *)mem->zone);
4600         mem->zone = NULL;
4601         mem->va = NULL;
4602         mem->pa = (u64)0;
4603
4604         return I40E_SUCCESS;
4605 }
4606
4607 /**
4608  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4609  * @hw:   pointer to the HW structure
4610  * @mem:  pointer to mem struct to fill out
4611  * @size: size of memory requested
4612  **/
4613 enum i40e_status_code
4614 i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4615                          struct i40e_virt_mem *mem,
4616                          u32 size)
4617 {
4618         if (!mem)
4619                 return I40E_ERR_PARAM;
4620
4621         mem->size = size;
4622         mem->va = rte_zmalloc("i40e", size, 0);
4623
4624         if (mem->va)
4625                 return I40E_SUCCESS;
4626         else
4627                 return I40E_ERR_NO_MEMORY;
4628 }
4629
4630 /**
4631  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4632  * @hw:   pointer to the HW structure
4633  * @mem:  pointer to mem struct to free
4634  **/
4635 enum i40e_status_code
4636 i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4637                      struct i40e_virt_mem *mem)
4638 {
4639         if (!mem)
4640                 return I40E_ERR_PARAM;
4641
4642         rte_free(mem->va);
4643         mem->va = NULL;
4644
4645         return I40E_SUCCESS;
4646 }
4647
4648 void
4649 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4650 {
4651         rte_spinlock_init(&sp->spinlock);
4652 }
4653
4654 void
4655 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4656 {
4657         rte_spinlock_lock(&sp->spinlock);
4658 }
4659
4660 void
4661 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4662 {
4663         rte_spinlock_unlock(&sp->spinlock);
4664 }
4665
4666 void
4667 i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
4668 {
4669         return;
4670 }
4671
4672 /**
4673  * Get the hardware capabilities, which will be parsed
4674  * and saved into struct i40e_hw.
4675  */
4676 static int
4677 i40e_get_cap(struct i40e_hw *hw)
4678 {
4679         struct i40e_aqc_list_capabilities_element_resp *buf;
4680         uint16_t len, size = 0;
4681         int ret;
4682
4683         /* Calculate a huge enough buff for saving response data temporarily */
4684         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4685                                                 I40E_MAX_CAP_ELE_NUM;
4686         buf = rte_zmalloc("i40e", len, 0);
4687         if (!buf) {
4688                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4689                 return I40E_ERR_NO_MEMORY;
4690         }
4691
4692         /* Get, parse the capabilities and save it to hw */
4693         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4694                         i40e_aqc_opc_list_func_capabilities, NULL);
4695         if (ret != I40E_SUCCESS)
4696                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4697
4698         /* Free the temporary buffer after being used */
4699         rte_free(buf);
4700
4701         return ret;
4702 }
4703
4704 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4705
4706 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4707                 const char *value,
4708                 void *opaque)
4709 {
4710         struct i40e_pf *pf;
4711         unsigned long num;
4712         char *end;
4713
4714         pf = (struct i40e_pf *)opaque;
4715         RTE_SET_USED(key);
4716
4717         errno = 0;
4718         num = strtoul(value, &end, 0);
4719         if (errno != 0 || end == value || *end != 0) {
4720                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4721                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4722                 return -(EINVAL);
4723         }
4724
4725         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4726                 pf->vf_nb_qp_max = (uint16_t)num;
4727         else
4728                 /* here return 0 to make next valid same argument work */
4729                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4730                             "power of 2 and equal or less than 16 !, Now it is "
4731                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4732
4733         return 0;
4734 }
4735
4736 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4737 {
4738         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4739         struct rte_kvargs *kvlist;
4740         int kvargs_count;
4741
4742         /* set default queue number per VF as 4 */
4743         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4744
4745         if (dev->device->devargs == NULL)
4746                 return 0;
4747
4748         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4749         if (kvlist == NULL)
4750                 return -(EINVAL);
4751
4752         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4753         if (!kvargs_count) {
4754                 rte_kvargs_free(kvlist);
4755                 return 0;
4756         }
4757
4758         if (kvargs_count > 1)
4759                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4760                             "the first invalid or last valid one is used !",
4761                             ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4762
4763         rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4764                            i40e_pf_parse_vf_queue_number_handler, pf);
4765
4766         rte_kvargs_free(kvlist);
4767
4768         return 0;
4769 }
4770
4771 static int
4772 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4773 {
4774         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4775         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4776         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4777         uint16_t qp_count = 0, vsi_count = 0;
4778
4779         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4780                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4781                 return -EINVAL;
4782         }
4783
4784         i40e_pf_config_vf_rxq_number(dev);
4785
4786         /* Add the parameter init for LFC */
4787         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4788         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4789         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4790
4791         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4792         pf->max_num_vsi = hw->func_caps.num_vsis;
4793         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4794         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4795
4796         /* FDir queue/VSI allocation */
4797         pf->fdir_qp_offset = 0;
4798         if (hw->func_caps.fd) {
4799                 pf->flags |= I40E_FLAG_FDIR;
4800                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4801         } else {
4802                 pf->fdir_nb_qps = 0;
4803         }
4804         qp_count += pf->fdir_nb_qps;
4805         vsi_count += 1;
4806
4807         /* LAN queue/VSI allocation */
4808         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4809         if (!hw->func_caps.rss) {
4810                 pf->lan_nb_qps = 1;
4811         } else {
4812                 pf->flags |= I40E_FLAG_RSS;
4813                 if (hw->mac.type == I40E_MAC_X722)
4814                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4815                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4816         }
4817         qp_count += pf->lan_nb_qps;
4818         vsi_count += 1;
4819
4820         /* VF queue/VSI allocation */
4821         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4822         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4823                 pf->flags |= I40E_FLAG_SRIOV;
4824                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4825                 pf->vf_num = pci_dev->max_vfs;
4826                 PMD_DRV_LOG(DEBUG,
4827                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4828                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4829         } else {
4830                 pf->vf_nb_qps = 0;
4831                 pf->vf_num = 0;
4832         }
4833         qp_count += pf->vf_nb_qps * pf->vf_num;
4834         vsi_count += pf->vf_num;
4835
4836         /* VMDq queue/VSI allocation */
4837         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4838         pf->vmdq_nb_qps = 0;
4839         pf->max_nb_vmdq_vsi = 0;
4840         if (hw->func_caps.vmdq) {
4841                 if (qp_count < hw->func_caps.num_tx_qp &&
4842                         vsi_count < hw->func_caps.num_vsis) {
4843                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4844                                 qp_count) / pf->vmdq_nb_qp_max;
4845
4846                         /* Limit the maximum number of VMDq vsi to the maximum
4847                          * ethdev can support
4848                          */
4849                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4850                                 hw->func_caps.num_vsis - vsi_count);
4851                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4852                                 ETH_64_POOLS);
4853                         if (pf->max_nb_vmdq_vsi) {
4854                                 pf->flags |= I40E_FLAG_VMDQ;
4855                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4856                                 PMD_DRV_LOG(DEBUG,
4857                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4858                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4859                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4860                         } else {
4861                                 PMD_DRV_LOG(INFO,
4862                                         "No enough queues left for VMDq");
4863                         }
4864                 } else {
4865                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4866                 }
4867         }
4868         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4869         vsi_count += pf->max_nb_vmdq_vsi;
4870
4871         if (hw->func_caps.dcb)
4872                 pf->flags |= I40E_FLAG_DCB;
4873
4874         if (qp_count > hw->func_caps.num_tx_qp) {
4875                 PMD_DRV_LOG(ERR,
4876                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4877                         qp_count, hw->func_caps.num_tx_qp);
4878                 return -EINVAL;
4879         }
4880         if (vsi_count > hw->func_caps.num_vsis) {
4881                 PMD_DRV_LOG(ERR,
4882                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4883                         vsi_count, hw->func_caps.num_vsis);
4884                 return -EINVAL;
4885         }
4886
4887         return 0;
4888 }
4889
4890 static int
4891 i40e_pf_get_switch_config(struct i40e_pf *pf)
4892 {
4893         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4894         struct i40e_aqc_get_switch_config_resp *switch_config;
4895         struct i40e_aqc_switch_config_element_resp *element;
4896         uint16_t start_seid = 0, num_reported;
4897         int ret;
4898
4899         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4900                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4901         if (!switch_config) {
4902                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4903                 return -ENOMEM;
4904         }
4905
4906         /* Get the switch configurations */
4907         ret = i40e_aq_get_switch_config(hw, switch_config,
4908                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4909         if (ret != I40E_SUCCESS) {
4910                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4911                 goto fail;
4912         }
4913         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4914         if (num_reported != 1) { /* The number should be 1 */
4915                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4916                 goto fail;
4917         }
4918
4919         /* Parse the switch configuration elements */
4920         element = &(switch_config->element[0]);
4921         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4922                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4923                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4924         } else
4925                 PMD_DRV_LOG(INFO, "Unknown element type");
4926
4927 fail:
4928         rte_free(switch_config);
4929
4930         return ret;
4931 }
4932
4933 static int
4934 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4935                         uint32_t num)
4936 {
4937         struct pool_entry *entry;
4938
4939         if (pool == NULL || num == 0)
4940                 return -EINVAL;
4941
4942         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4943         if (entry == NULL) {
4944                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4945                 return -ENOMEM;
4946         }
4947
4948         /* queue heap initialize */
4949         pool->num_free = num;
4950         pool->num_alloc = 0;
4951         pool->base = base;
4952         LIST_INIT(&pool->alloc_list);
4953         LIST_INIT(&pool->free_list);
4954
4955         /* Initialize element  */
4956         entry->base = 0;
4957         entry->len = num;
4958
4959         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4960         return 0;
4961 }
4962
4963 static void
4964 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4965 {
4966         struct pool_entry *entry, *next_entry;
4967
4968         if (pool == NULL)
4969                 return;
4970
4971         for (entry = LIST_FIRST(&pool->alloc_list);
4972                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4973                         entry = next_entry) {
4974                 LIST_REMOVE(entry, next);
4975                 rte_free(entry);
4976         }
4977
4978         for (entry = LIST_FIRST(&pool->free_list);
4979                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4980                         entry = next_entry) {
4981                 LIST_REMOVE(entry, next);
4982                 rte_free(entry);
4983         }
4984
4985         pool->num_free = 0;
4986         pool->num_alloc = 0;
4987         pool->base = 0;
4988         LIST_INIT(&pool->alloc_list);
4989         LIST_INIT(&pool->free_list);
4990 }
4991
4992 static int
4993 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4994                        uint32_t base)
4995 {
4996         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4997         uint32_t pool_offset;
4998         uint16_t len;
4999         int insert;
5000
5001         if (pool == NULL) {
5002                 PMD_DRV_LOG(ERR, "Invalid parameter");
5003                 return -EINVAL;
5004         }
5005
5006         pool_offset = base - pool->base;
5007         /* Lookup in alloc list */
5008         LIST_FOREACH(entry, &pool->alloc_list, next) {
5009                 if (entry->base == pool_offset) {
5010                         valid_entry = entry;
5011                         LIST_REMOVE(entry, next);
5012                         break;
5013                 }
5014         }
5015
5016         /* Not find, return */
5017         if (valid_entry == NULL) {
5018                 PMD_DRV_LOG(ERR, "Failed to find entry");
5019                 return -EINVAL;
5020         }
5021
5022         /**
5023          * Found it, move it to free list  and try to merge.
5024          * In order to make merge easier, always sort it by qbase.
5025          * Find adjacent prev and last entries.
5026          */
5027         prev = next = NULL;
5028         LIST_FOREACH(entry, &pool->free_list, next) {
5029                 if (entry->base > valid_entry->base) {
5030                         next = entry;
5031                         break;
5032                 }
5033                 prev = entry;
5034         }
5035
5036         insert = 0;
5037         len = valid_entry->len;
5038         /* Try to merge with next one*/
5039         if (next != NULL) {
5040                 /* Merge with next one */
5041                 if (valid_entry->base + len == next->base) {
5042                         next->base = valid_entry->base;
5043                         next->len += len;
5044                         rte_free(valid_entry);
5045                         valid_entry = next;
5046                         insert = 1;
5047                 }
5048         }
5049
5050         if (prev != NULL) {
5051                 /* Merge with previous one */
5052                 if (prev->base + prev->len == valid_entry->base) {
5053                         prev->len += len;
5054                         /* If it merge with next one, remove next node */
5055                         if (insert == 1) {
5056                                 LIST_REMOVE(valid_entry, next);
5057                                 rte_free(valid_entry);
5058                                 valid_entry = NULL;
5059                         } else {
5060                                 rte_free(valid_entry);
5061                                 valid_entry = NULL;
5062                                 insert = 1;
5063                         }
5064                 }
5065         }
5066
5067         /* Not find any entry to merge, insert */
5068         if (insert == 0) {
5069                 if (prev != NULL)
5070                         LIST_INSERT_AFTER(prev, valid_entry, next);
5071                 else if (next != NULL)
5072                         LIST_INSERT_BEFORE(next, valid_entry, next);
5073                 else /* It's empty list, insert to head */
5074                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5075         }
5076
5077         pool->num_free += len;
5078         pool->num_alloc -= len;
5079
5080         return 0;
5081 }
5082
5083 static int
5084 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5085                        uint16_t num)
5086 {
5087         struct pool_entry *entry, *valid_entry;
5088
5089         if (pool == NULL || num == 0) {
5090                 PMD_DRV_LOG(ERR, "Invalid parameter");
5091                 return -EINVAL;
5092         }
5093
5094         if (pool->num_free < num) {
5095                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5096                             num, pool->num_free);
5097                 return -ENOMEM;
5098         }
5099
5100         valid_entry = NULL;
5101         /* Lookup  in free list and find most fit one */
5102         LIST_FOREACH(entry, &pool->free_list, next) {
5103                 if (entry->len >= num) {
5104                         /* Find best one */
5105                         if (entry->len == num) {
5106                                 valid_entry = entry;
5107                                 break;
5108                         }
5109                         if (valid_entry == NULL || valid_entry->len > entry->len)
5110                                 valid_entry = entry;
5111                 }
5112         }
5113
5114         /* Not find one to satisfy the request, return */
5115         if (valid_entry == NULL) {
5116                 PMD_DRV_LOG(ERR, "No valid entry found");
5117                 return -ENOMEM;
5118         }
5119         /**
5120          * The entry have equal queue number as requested,
5121          * remove it from alloc_list.
5122          */
5123         if (valid_entry->len == num) {
5124                 LIST_REMOVE(valid_entry, next);
5125         } else {
5126                 /**
5127                  * The entry have more numbers than requested,
5128                  * create a new entry for alloc_list and minus its
5129                  * queue base and number in free_list.
5130                  */
5131                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5132                 if (entry == NULL) {
5133                         PMD_DRV_LOG(ERR,
5134                                 "Failed to allocate memory for resource pool");
5135                         return -ENOMEM;
5136                 }
5137                 entry->base = valid_entry->base;
5138                 entry->len = num;
5139                 valid_entry->base += num;
5140                 valid_entry->len -= num;
5141                 valid_entry = entry;
5142         }
5143
5144         /* Insert it into alloc list, not sorted */
5145         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5146
5147         pool->num_free -= valid_entry->len;
5148         pool->num_alloc += valid_entry->len;
5149
5150         return valid_entry->base + pool->base;
5151 }
5152
5153 /**
5154  * bitmap_is_subset - Check whether src2 is subset of src1
5155  **/
5156 static inline int
5157 bitmap_is_subset(uint8_t src1, uint8_t src2)
5158 {
5159         return !((src1 ^ src2) & src2);
5160 }
5161
5162 static enum i40e_status_code
5163 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5164 {
5165         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5166
5167         /* If DCB is not supported, only default TC is supported */
5168         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5169                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5170                 return I40E_NOT_SUPPORTED;
5171         }
5172
5173         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5174                 PMD_DRV_LOG(ERR,
5175                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
5176                         hw->func_caps.enabled_tcmap, enabled_tcmap);
5177                 return I40E_NOT_SUPPORTED;
5178         }
5179         return I40E_SUCCESS;
5180 }
5181
5182 int
5183 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5184                                 struct i40e_vsi_vlan_pvid_info *info)
5185 {
5186         struct i40e_hw *hw;
5187         struct i40e_vsi_context ctxt;
5188         uint8_t vlan_flags = 0;
5189         int ret;
5190
5191         if (vsi == NULL || info == NULL) {
5192                 PMD_DRV_LOG(ERR, "invalid parameters");
5193                 return I40E_ERR_PARAM;
5194         }
5195
5196         if (info->on) {
5197                 vsi->info.pvid = info->config.pvid;
5198                 /**
5199                  * If insert pvid is enabled, only tagged pkts are
5200                  * allowed to be sent out.
5201                  */
5202                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5203                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5204         } else {
5205                 vsi->info.pvid = 0;
5206                 if (info->config.reject.tagged == 0)
5207                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5208
5209                 if (info->config.reject.untagged == 0)
5210                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5211         }
5212         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5213                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
5214         vsi->info.port_vlan_flags |= vlan_flags;
5215         vsi->info.valid_sections =
5216                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5217         memset(&ctxt, 0, sizeof(ctxt));
5218         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5219         ctxt.seid = vsi->seid;
5220
5221         hw = I40E_VSI_TO_HW(vsi);
5222         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5223         if (ret != I40E_SUCCESS)
5224                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
5225
5226         return ret;
5227 }
5228
5229 static int
5230 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5231 {
5232         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5233         int i, ret;
5234         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5235
5236         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5237         if (ret != I40E_SUCCESS)
5238                 return ret;
5239
5240         if (!vsi->seid) {
5241                 PMD_DRV_LOG(ERR, "seid not valid");
5242                 return -EINVAL;
5243         }
5244
5245         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5246         tc_bw_data.tc_valid_bits = enabled_tcmap;
5247         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5248                 tc_bw_data.tc_bw_credits[i] =
5249                         (enabled_tcmap & (1 << i)) ? 1 : 0;
5250
5251         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5252         if (ret != I40E_SUCCESS) {
5253                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5254                 return ret;
5255         }
5256
5257         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5258                                         sizeof(vsi->info.qs_handle));
5259         return I40E_SUCCESS;
5260 }
5261
5262 static enum i40e_status_code
5263 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5264                                  struct i40e_aqc_vsi_properties_data *info,
5265                                  uint8_t enabled_tcmap)
5266 {
5267         enum i40e_status_code ret;
5268         int i, total_tc = 0;
5269         uint16_t qpnum_per_tc, bsf, qp_idx;
5270
5271         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5272         if (ret != I40E_SUCCESS)
5273                 return ret;
5274
5275         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5276                 if (enabled_tcmap & (1 << i))
5277                         total_tc++;
5278         if (total_tc == 0)
5279                 total_tc = 1;
5280         vsi->enabled_tc = enabled_tcmap;
5281
5282         /* Number of queues per enabled TC */
5283         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5284         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5285         bsf = rte_bsf32(qpnum_per_tc);
5286
5287         /* Adjust the queue number to actual queues that can be applied */
5288         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5289                 vsi->nb_qps = qpnum_per_tc * total_tc;
5290
5291         /**
5292          * Configure TC and queue mapping parameters, for enabled TC,
5293          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5294          * default queue will serve it.
5295          */
5296         qp_idx = 0;
5297         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5298                 if (vsi->enabled_tc & (1 << i)) {
5299                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5300                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5301                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5302                         qp_idx += qpnum_per_tc;
5303                 } else
5304                         info->tc_mapping[i] = 0;
5305         }
5306
5307         /* Associate queue number with VSI */
5308         if (vsi->type == I40E_VSI_SRIOV) {
5309                 info->mapping_flags |=
5310                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5311                 for (i = 0; i < vsi->nb_qps; i++)
5312                         info->queue_mapping[i] =
5313                                 rte_cpu_to_le_16(vsi->base_queue + i);
5314         } else {
5315                 info->mapping_flags |=
5316                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5317                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5318         }
5319         info->valid_sections |=
5320                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5321
5322         return I40E_SUCCESS;
5323 }
5324
5325 static int
5326 i40e_veb_release(struct i40e_veb *veb)
5327 {
5328         struct i40e_vsi *vsi;
5329         struct i40e_hw *hw;
5330
5331         if (veb == NULL)
5332                 return -EINVAL;
5333
5334         if (!TAILQ_EMPTY(&veb->head)) {
5335                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5336                 return -EACCES;
5337         }
5338         /* associate_vsi field is NULL for floating VEB */
5339         if (veb->associate_vsi != NULL) {
5340                 vsi = veb->associate_vsi;
5341                 hw = I40E_VSI_TO_HW(vsi);
5342
5343                 vsi->uplink_seid = veb->uplink_seid;
5344                 vsi->veb = NULL;
5345         } else {
5346                 veb->associate_pf->main_vsi->floating_veb = NULL;
5347                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5348         }
5349
5350         i40e_aq_delete_element(hw, veb->seid, NULL);
5351         rte_free(veb);
5352         return I40E_SUCCESS;
5353 }
5354
5355 /* Setup a veb */
5356 static struct i40e_veb *
5357 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5358 {
5359         struct i40e_veb *veb;
5360         int ret;
5361         struct i40e_hw *hw;
5362
5363         if (pf == NULL) {
5364                 PMD_DRV_LOG(ERR,
5365                             "veb setup failed, associated PF shouldn't null");
5366                 return NULL;
5367         }
5368         hw = I40E_PF_TO_HW(pf);
5369
5370         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5371         if (!veb) {
5372                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5373                 goto fail;
5374         }
5375
5376         veb->associate_vsi = vsi;
5377         veb->associate_pf = pf;
5378         TAILQ_INIT(&veb->head);
5379         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5380
5381         /* create floating veb if vsi is NULL */
5382         if (vsi != NULL) {
5383                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5384                                       I40E_DEFAULT_TCMAP, false,
5385                                       &veb->seid, false, NULL);
5386         } else {
5387                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5388                                       true, &veb->seid, false, NULL);
5389         }
5390
5391         if (ret != I40E_SUCCESS) {
5392                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5393                             hw->aq.asq_last_status);
5394                 goto fail;
5395         }
5396         veb->enabled_tc = I40E_DEFAULT_TCMAP;
5397
5398         /* get statistics index */
5399         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5400                                 &veb->stats_idx, NULL, NULL, NULL);
5401         if (ret != I40E_SUCCESS) {
5402                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5403                             hw->aq.asq_last_status);
5404                 goto fail;
5405         }
5406         /* Get VEB bandwidth, to be implemented */
5407         /* Now associated vsi binding to the VEB, set uplink to this VEB */
5408         if (vsi)
5409                 vsi->uplink_seid = veb->seid;
5410
5411         return veb;
5412 fail:
5413         rte_free(veb);
5414         return NULL;
5415 }
5416
5417 int
5418 i40e_vsi_release(struct i40e_vsi *vsi)
5419 {
5420         struct i40e_pf *pf;
5421         struct i40e_hw *hw;
5422         struct i40e_vsi_list *vsi_list;
5423         void *temp;
5424         int ret;
5425         struct i40e_mac_filter *f;
5426         uint16_t user_param;
5427
5428         if (!vsi)
5429                 return I40E_SUCCESS;
5430
5431         if (!vsi->adapter)
5432                 return -EFAULT;
5433
5434         user_param = vsi->user_param;
5435
5436         pf = I40E_VSI_TO_PF(vsi);
5437         hw = I40E_VSI_TO_HW(vsi);
5438
5439         /* VSI has child to attach, release child first */
5440         if (vsi->veb) {
5441                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5442                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5443                                 return -1;
5444                 }
5445                 i40e_veb_release(vsi->veb);
5446         }
5447
5448         if (vsi->floating_veb) {
5449                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5450                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5451                                 return -1;
5452                 }
5453         }
5454
5455         /* Remove all macvlan filters of the VSI */
5456         i40e_vsi_remove_all_macvlan_filter(vsi);
5457         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5458                 rte_free(f);
5459
5460         if (vsi->type != I40E_VSI_MAIN &&
5461             ((vsi->type != I40E_VSI_SRIOV) ||
5462             !pf->floating_veb_list[user_param])) {
5463                 /* Remove vsi from parent's sibling list */
5464                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5465                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5466                         return I40E_ERR_PARAM;
5467                 }
5468                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5469                                 &vsi->sib_vsi_list, list);
5470
5471                 /* Remove all switch element of the VSI */
5472                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5473                 if (ret != I40E_SUCCESS)
5474                         PMD_DRV_LOG(ERR, "Failed to delete element");
5475         }
5476
5477         if ((vsi->type == I40E_VSI_SRIOV) &&
5478             pf->floating_veb_list[user_param]) {
5479                 /* Remove vsi from parent's sibling list */
5480                 if (vsi->parent_vsi == NULL ||
5481                     vsi->parent_vsi->floating_veb == NULL) {
5482                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5483                         return I40E_ERR_PARAM;
5484                 }
5485                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5486                              &vsi->sib_vsi_list, list);
5487
5488                 /* Remove all switch element of the VSI */
5489                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5490                 if (ret != I40E_SUCCESS)
5491                         PMD_DRV_LOG(ERR, "Failed to delete element");
5492         }
5493
5494         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5495
5496         if (vsi->type != I40E_VSI_SRIOV)
5497                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5498         rte_free(vsi);
5499
5500         return I40E_SUCCESS;
5501 }
5502
5503 static int
5504 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5505 {
5506         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5507         struct i40e_aqc_remove_macvlan_element_data def_filter;
5508         struct i40e_mac_filter_info filter;
5509         int ret;
5510
5511         if (vsi->type != I40E_VSI_MAIN)
5512                 return I40E_ERR_CONFIG;
5513         memset(&def_filter, 0, sizeof(def_filter));
5514         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5515                                         ETH_ADDR_LEN);
5516         def_filter.vlan_tag = 0;
5517         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5518                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5519         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5520         if (ret != I40E_SUCCESS) {
5521                 struct i40e_mac_filter *f;
5522                 struct rte_ether_addr *mac;
5523
5524                 PMD_DRV_LOG(DEBUG,
5525                             "Cannot remove the default macvlan filter");
5526                 /* It needs to add the permanent mac into mac list */
5527                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5528                 if (f == NULL) {
5529                         PMD_DRV_LOG(ERR, "failed to allocate memory");
5530                         return I40E_ERR_NO_MEMORY;
5531                 }
5532                 mac = &f->mac_info.mac_addr;
5533                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5534                                 ETH_ADDR_LEN);
5535                 f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5536                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5537                 vsi->mac_num++;
5538
5539                 return ret;
5540         }
5541         rte_memcpy(&filter.mac_addr,
5542                 (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5543         filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5544         return i40e_vsi_add_mac(vsi, &filter);
5545 }
5546
5547 /*
5548  * i40e_vsi_get_bw_config - Query VSI BW Information
5549  * @vsi: the VSI to be queried
5550  *
5551  * Returns 0 on success, negative value on failure
5552  */
5553 static enum i40e_status_code
5554 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5555 {
5556         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5557         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5558         struct i40e_hw *hw = &vsi->adapter->hw;
5559         i40e_status ret;
5560         int i;
5561         uint32_t bw_max;
5562
5563         memset(&bw_config, 0, sizeof(bw_config));
5564         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5565         if (ret != I40E_SUCCESS) {
5566                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5567                             hw->aq.asq_last_status);
5568                 return ret;
5569         }
5570
5571         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5572         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5573                                         &ets_sla_config, NULL);
5574         if (ret != I40E_SUCCESS) {
5575                 PMD_DRV_LOG(ERR,
5576                         "VSI failed to get TC bandwdith configuration %u",
5577                         hw->aq.asq_last_status);
5578                 return ret;
5579         }
5580
5581         /* store and print out BW info */
5582         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5583         vsi->bw_info.bw_max = bw_config.max_bw;
5584         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5585         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5586         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5587                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5588                      I40E_16_BIT_WIDTH);
5589         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5590                 vsi->bw_info.bw_ets_share_credits[i] =
5591                                 ets_sla_config.share_credits[i];
5592                 vsi->bw_info.bw_ets_credits[i] =
5593                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5594                 /* 4 bits per TC, 4th bit is reserved */
5595                 vsi->bw_info.bw_ets_max[i] =
5596                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5597                                   RTE_LEN2MASK(3, uint8_t));
5598                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5599                             vsi->bw_info.bw_ets_share_credits[i]);
5600                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5601                             vsi->bw_info.bw_ets_credits[i]);
5602                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5603                             vsi->bw_info.bw_ets_max[i]);
5604         }
5605
5606         return I40E_SUCCESS;
5607 }
5608
5609 /* i40e_enable_pf_lb
5610  * @pf: pointer to the pf structure
5611  *
5612  * allow loopback on pf
5613  */
5614 static inline void
5615 i40e_enable_pf_lb(struct i40e_pf *pf)
5616 {
5617         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5618         struct i40e_vsi_context ctxt;
5619         int ret;
5620
5621         /* Use the FW API if FW >= v5.0 */
5622         if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5623                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5624                 return;
5625         }
5626
5627         memset(&ctxt, 0, sizeof(ctxt));
5628         ctxt.seid = pf->main_vsi_seid;
5629         ctxt.pf_num = hw->pf_id;
5630         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5631         if (ret) {
5632                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5633                             ret, hw->aq.asq_last_status);
5634                 return;
5635         }
5636         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5637         ctxt.info.valid_sections =
5638                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5639         ctxt.info.switch_id |=
5640                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5641
5642         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5643         if (ret)
5644                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5645                             hw->aq.asq_last_status);
5646 }
5647
5648 /* Setup a VSI */
5649 struct i40e_vsi *
5650 i40e_vsi_setup(struct i40e_pf *pf,
5651                enum i40e_vsi_type type,
5652                struct i40e_vsi *uplink_vsi,
5653                uint16_t user_param)
5654 {
5655         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5656         struct i40e_vsi *vsi;
5657         struct i40e_mac_filter_info filter;
5658         int ret;
5659         struct i40e_vsi_context ctxt;
5660         struct rte_ether_addr broadcast =
5661                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5662
5663         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5664             uplink_vsi == NULL) {
5665                 PMD_DRV_LOG(ERR,
5666                         "VSI setup failed, VSI link shouldn't be NULL");
5667                 return NULL;
5668         }
5669
5670         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5671                 PMD_DRV_LOG(ERR,
5672                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5673                 return NULL;
5674         }
5675
5676         /* two situations
5677          * 1.type is not MAIN and uplink vsi is not NULL
5678          * If uplink vsi didn't setup VEB, create one first under veb field
5679          * 2.type is SRIOV and the uplink is NULL
5680          * If floating VEB is NULL, create one veb under floating veb field
5681          */
5682
5683         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5684             uplink_vsi->veb == NULL) {
5685                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5686
5687                 if (uplink_vsi->veb == NULL) {
5688                         PMD_DRV_LOG(ERR, "VEB setup failed");
5689                         return NULL;
5690                 }
5691                 /* set ALLOWLOOPBACk on pf, when veb is created */
5692                 i40e_enable_pf_lb(pf);
5693         }
5694
5695         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5696             pf->main_vsi->floating_veb == NULL) {
5697                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5698
5699                 if (pf->main_vsi->floating_veb == NULL) {
5700                         PMD_DRV_LOG(ERR, "VEB setup failed");
5701                         return NULL;
5702                 }
5703         }
5704
5705         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5706         if (!vsi) {
5707                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5708                 return NULL;
5709         }
5710         TAILQ_INIT(&vsi->mac_list);
5711         vsi->type = type;
5712         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5713         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5714         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5715         vsi->user_param = user_param;
5716         vsi->vlan_anti_spoof_on = 0;
5717         vsi->vlan_filter_on = 0;
5718         /* Allocate queues */
5719         switch (vsi->type) {
5720         case I40E_VSI_MAIN  :
5721                 vsi->nb_qps = pf->lan_nb_qps;
5722                 break;
5723         case I40E_VSI_SRIOV :
5724                 vsi->nb_qps = pf->vf_nb_qps;
5725                 break;
5726         case I40E_VSI_VMDQ2:
5727                 vsi->nb_qps = pf->vmdq_nb_qps;
5728                 break;
5729         case I40E_VSI_FDIR:
5730                 vsi->nb_qps = pf->fdir_nb_qps;
5731                 break;
5732         default:
5733                 goto fail_mem;
5734         }
5735         /*
5736          * The filter status descriptor is reported in rx queue 0,
5737          * while the tx queue for fdir filter programming has no
5738          * such constraints, can be non-zero queues.
5739          * To simplify it, choose FDIR vsi use queue 0 pair.
5740          * To make sure it will use queue 0 pair, queue allocation
5741          * need be done before this function is called
5742          */
5743         if (type != I40E_VSI_FDIR) {
5744                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5745                         if (ret < 0) {
5746                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5747                                                 vsi->seid, ret);
5748                                 goto fail_mem;
5749                         }
5750                         vsi->base_queue = ret;
5751         } else
5752                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5753
5754         /* VF has MSIX interrupt in VF range, don't allocate here */
5755         if (type == I40E_VSI_MAIN) {
5756                 if (pf->support_multi_driver) {
5757                         /* If support multi-driver, need to use INT0 instead of
5758                          * allocating from msix pool. The Msix pool is init from
5759                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5760                          * to 1 without calling i40e_res_pool_alloc.
5761                          */
5762                         vsi->msix_intr = 0;
5763                         vsi->nb_msix = 1;
5764                 } else {
5765                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5766                                                   RTE_MIN(vsi->nb_qps,
5767                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5768                         if (ret < 0) {
5769                                 PMD_DRV_LOG(ERR,
5770                                             "VSI MAIN %d get heap failed %d",
5771                                             vsi->seid, ret);
5772                                 goto fail_queue_alloc;
5773                         }
5774                         vsi->msix_intr = ret;
5775                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5776                                                RTE_MAX_RXTX_INTR_VEC_ID);
5777                 }
5778         } else if (type != I40E_VSI_SRIOV) {
5779                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5780                 if (ret < 0) {
5781                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5782                         if (type != I40E_VSI_FDIR)
5783                                 goto fail_queue_alloc;
5784                         vsi->msix_intr = 0;
5785                         vsi->nb_msix = 0;
5786                 } else {
5787                         vsi->msix_intr = ret;
5788                         vsi->nb_msix = 1;
5789                 }
5790         } else {
5791                 vsi->msix_intr = 0;
5792                 vsi->nb_msix = 0;
5793         }
5794
5795         /* Add VSI */
5796         if (type == I40E_VSI_MAIN) {
5797                 /* For main VSI, no need to add since it's default one */
5798                 vsi->uplink_seid = pf->mac_seid;
5799                 vsi->seid = pf->main_vsi_seid;
5800                 /* Bind queues with specific MSIX interrupt */
5801                 /**
5802                  * Needs 2 interrupt at least, one for misc cause which will
5803                  * enabled from OS side, Another for queues binding the
5804                  * interrupt from device side only.
5805                  */
5806
5807                 /* Get default VSI parameters from hardware */
5808                 memset(&ctxt, 0, sizeof(ctxt));
5809                 ctxt.seid = vsi->seid;
5810                 ctxt.pf_num = hw->pf_id;
5811                 ctxt.uplink_seid = vsi->uplink_seid;
5812                 ctxt.vf_num = 0;
5813                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5814                 if (ret != I40E_SUCCESS) {
5815                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5816                         goto fail_msix_alloc;
5817                 }
5818                 rte_memcpy(&vsi->info, &ctxt.info,
5819                         sizeof(struct i40e_aqc_vsi_properties_data));
5820                 vsi->vsi_id = ctxt.vsi_number;
5821                 vsi->info.valid_sections = 0;
5822
5823                 /* Configure tc, enabled TC0 only */
5824                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5825                         I40E_SUCCESS) {
5826                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5827                         goto fail_msix_alloc;
5828                 }
5829
5830                 /* TC, queue mapping */
5831                 memset(&ctxt, 0, sizeof(ctxt));
5832                 vsi->info.valid_sections |=
5833                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5834                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5835                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5836                 rte_memcpy(&ctxt.info, &vsi->info,
5837                         sizeof(struct i40e_aqc_vsi_properties_data));
5838                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5839                                                 I40E_DEFAULT_TCMAP);
5840                 if (ret != I40E_SUCCESS) {
5841                         PMD_DRV_LOG(ERR,
5842                                 "Failed to configure TC queue mapping");
5843                         goto fail_msix_alloc;
5844                 }
5845                 ctxt.seid = vsi->seid;
5846                 ctxt.pf_num = hw->pf_id;
5847                 ctxt.uplink_seid = vsi->uplink_seid;
5848                 ctxt.vf_num = 0;
5849
5850                 /* Update VSI parameters */
5851                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5852                 if (ret != I40E_SUCCESS) {
5853                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5854                         goto fail_msix_alloc;
5855                 }
5856
5857                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5858                                                 sizeof(vsi->info.tc_mapping));
5859                 rte_memcpy(&vsi->info.queue_mapping,
5860                                 &ctxt.info.queue_mapping,
5861                         sizeof(vsi->info.queue_mapping));
5862                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5863                 vsi->info.valid_sections = 0;
5864
5865                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5866                                 ETH_ADDR_LEN);
5867
5868                 /**
5869                  * Updating default filter settings are necessary to prevent
5870                  * reception of tagged packets.
5871                  * Some old firmware configurations load a default macvlan
5872                  * filter which accepts both tagged and untagged packets.
5873                  * The updating is to use a normal filter instead if needed.
5874                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5875                  * The firmware with correct configurations load the default
5876                  * macvlan filter which is expected and cannot be removed.
5877                  */
5878                 i40e_update_default_filter_setting(vsi);
5879                 i40e_config_qinq(hw, vsi);
5880         } else if (type == I40E_VSI_SRIOV) {
5881                 memset(&ctxt, 0, sizeof(ctxt));
5882                 /**
5883                  * For other VSI, the uplink_seid equals to uplink VSI's
5884                  * uplink_seid since they share same VEB
5885                  */
5886                 if (uplink_vsi == NULL)
5887                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5888                 else
5889                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5890                 ctxt.pf_num = hw->pf_id;
5891                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5892                 ctxt.uplink_seid = vsi->uplink_seid;
5893                 ctxt.connection_type = 0x1;
5894                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5895
5896                 /* Use the VEB configuration if FW >= v5.0 */
5897                 if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
5898                         /* Configure switch ID */
5899                         ctxt.info.valid_sections |=
5900                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5901                         ctxt.info.switch_id =
5902                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5903                 }
5904
5905                 /* Configure port/vlan */
5906                 ctxt.info.valid_sections |=
5907                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5908                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5909                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5910                                                 hw->func_caps.enabled_tcmap);
5911                 if (ret != I40E_SUCCESS) {
5912                         PMD_DRV_LOG(ERR,
5913                                 "Failed to configure TC queue mapping");
5914                         goto fail_msix_alloc;
5915                 }
5916
5917                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5918                 ctxt.info.valid_sections |=
5919                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5920                 /**
5921                  * Since VSI is not created yet, only configure parameter,
5922                  * will add vsi below.
5923                  */
5924
5925                 i40e_config_qinq(hw, vsi);
5926         } else if (type == I40E_VSI_VMDQ2) {
5927                 memset(&ctxt, 0, sizeof(ctxt));
5928                 /*
5929                  * For other VSI, the uplink_seid equals to uplink VSI's
5930                  * uplink_seid since they share same VEB
5931                  */
5932                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5933                 ctxt.pf_num = hw->pf_id;
5934                 ctxt.vf_num = 0;
5935                 ctxt.uplink_seid = vsi->uplink_seid;
5936                 ctxt.connection_type = 0x1;
5937                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5938
5939                 ctxt.info.valid_sections |=
5940                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5941                 /* user_param carries flag to enable loop back */
5942                 if (user_param) {
5943                         ctxt.info.switch_id =
5944                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5945                         ctxt.info.switch_id |=
5946                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5947                 }
5948
5949                 /* Configure port/vlan */
5950                 ctxt.info.valid_sections |=
5951                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5952                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5953                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5954                                                 I40E_DEFAULT_TCMAP);
5955                 if (ret != I40E_SUCCESS) {
5956                         PMD_DRV_LOG(ERR,
5957                                 "Failed to configure TC queue mapping");
5958                         goto fail_msix_alloc;
5959                 }
5960                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5961                 ctxt.info.valid_sections |=
5962                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5963         } else if (type == I40E_VSI_FDIR) {
5964                 memset(&ctxt, 0, sizeof(ctxt));
5965                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5966                 ctxt.pf_num = hw->pf_id;
5967                 ctxt.vf_num = 0;
5968                 ctxt.uplink_seid = vsi->uplink_seid;
5969                 ctxt.connection_type = 0x1;     /* regular data port */
5970                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5971                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5972                                                 I40E_DEFAULT_TCMAP);
5973                 if (ret != I40E_SUCCESS) {
5974                         PMD_DRV_LOG(ERR,
5975                                 "Failed to configure TC queue mapping.");
5976                         goto fail_msix_alloc;
5977                 }
5978                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5979                 ctxt.info.valid_sections |=
5980                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5981         } else {
5982                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5983                 goto fail_msix_alloc;
5984         }
5985
5986         if (vsi->type != I40E_VSI_MAIN) {
5987                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5988                 if (ret != I40E_SUCCESS) {
5989                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5990                                     hw->aq.asq_last_status);
5991                         goto fail_msix_alloc;
5992                 }
5993                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5994                 vsi->info.valid_sections = 0;
5995                 vsi->seid = ctxt.seid;
5996                 vsi->vsi_id = ctxt.vsi_number;
5997                 vsi->sib_vsi_list.vsi = vsi;
5998                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5999                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
6000                                           &vsi->sib_vsi_list, list);
6001                 } else {
6002                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
6003                                           &vsi->sib_vsi_list, list);
6004                 }
6005         }
6006
6007         /* MAC/VLAN configuration */
6008         rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
6009         filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
6010
6011         ret = i40e_vsi_add_mac(vsi, &filter);
6012         if (ret != I40E_SUCCESS) {
6013                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
6014                 goto fail_msix_alloc;
6015         }
6016
6017         /* Get VSI BW information */
6018         i40e_vsi_get_bw_config(vsi);
6019         return vsi;
6020 fail_msix_alloc:
6021         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
6022 fail_queue_alloc:
6023         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
6024 fail_mem:
6025         rte_free(vsi);
6026         return NULL;
6027 }
6028
6029 /* Configure vlan filter on or off */
6030 int
6031 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
6032 {
6033         int i, num;
6034         struct i40e_mac_filter *f;
6035         void *temp;
6036         struct i40e_mac_filter_info *mac_filter;
6037         enum i40e_mac_filter_type desired_filter;
6038         int ret = I40E_SUCCESS;
6039
6040         if (on) {
6041                 /* Filter to match MAC and VLAN */
6042                 desired_filter = I40E_MACVLAN_PERFECT_MATCH;
6043         } else {
6044                 /* Filter to match only MAC */
6045                 desired_filter = I40E_MAC_PERFECT_MATCH;
6046         }
6047
6048         num = vsi->mac_num;
6049
6050         mac_filter = rte_zmalloc("mac_filter_info_data",
6051                                  num * sizeof(*mac_filter), 0);
6052         if (mac_filter == NULL) {
6053                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6054                 return I40E_ERR_NO_MEMORY;
6055         }
6056
6057         i = 0;
6058
6059         /* Remove all existing mac */
6060         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6061                 mac_filter[i] = f->mac_info;
6062                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6063                 if (ret) {
6064                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6065                                     on ? "enable" : "disable");
6066                         goto DONE;
6067                 }
6068                 i++;
6069         }
6070
6071         /* Override with new filter */
6072         for (i = 0; i < num; i++) {
6073                 mac_filter[i].filter_type = desired_filter;
6074                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6075                 if (ret) {
6076                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6077                                     on ? "enable" : "disable");
6078                         goto DONE;
6079                 }
6080         }
6081
6082 DONE:
6083         rte_free(mac_filter);
6084         return ret;
6085 }
6086
6087 /* Configure vlan stripping on or off */
6088 int
6089 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6090 {
6091         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6092         struct i40e_vsi_context ctxt;
6093         uint8_t vlan_flags;
6094         int ret = I40E_SUCCESS;
6095
6096         /* Check if it has been already on or off */
6097         if (vsi->info.valid_sections &
6098                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6099                 if (on) {
6100                         if ((vsi->info.port_vlan_flags &
6101                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6102                                 return 0; /* already on */
6103                 } else {
6104                         if ((vsi->info.port_vlan_flags &
6105                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6106                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
6107                                 return 0; /* already off */
6108                 }
6109         }
6110
6111         if (on)
6112                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6113         else
6114                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6115         vsi->info.valid_sections =
6116                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6117         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6118         vsi->info.port_vlan_flags |= vlan_flags;
6119         ctxt.seid = vsi->seid;
6120         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6121         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6122         if (ret)
6123                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6124                             on ? "enable" : "disable");
6125
6126         return ret;
6127 }
6128
6129 static int
6130 i40e_dev_init_vlan(struct rte_eth_dev *dev)
6131 {
6132         struct rte_eth_dev_data *data = dev->data;
6133         int ret;
6134         int mask = 0;
6135
6136         /* Apply vlan offload setting */
6137         mask = ETH_VLAN_STRIP_MASK |
6138                ETH_QINQ_STRIP_MASK |
6139                ETH_VLAN_FILTER_MASK |
6140                ETH_VLAN_EXTEND_MASK;
6141         ret = i40e_vlan_offload_set(dev, mask);
6142         if (ret) {
6143                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6144                 return ret;
6145         }
6146
6147         /* Apply pvid setting */
6148         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6149                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
6150         if (ret)
6151                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
6152
6153         return ret;
6154 }
6155
6156 static int
6157 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6158 {
6159         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6160
6161         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6162 }
6163
6164 static int
6165 i40e_update_flow_control(struct i40e_hw *hw)
6166 {
6167 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6168         struct i40e_link_status link_status;
6169         uint32_t rxfc = 0, txfc = 0, reg;
6170         uint8_t an_info;
6171         int ret;
6172
6173         memset(&link_status, 0, sizeof(link_status));
6174         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6175         if (ret != I40E_SUCCESS) {
6176                 PMD_DRV_LOG(ERR, "Failed to get link status information");
6177                 goto write_reg; /* Disable flow control */
6178         }
6179
6180         an_info = hw->phy.link_info.an_info;
6181         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6182                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6183                 ret = I40E_ERR_NOT_READY;
6184                 goto write_reg; /* Disable flow control */
6185         }
6186         /**
6187          * If link auto negotiation is enabled, flow control needs to
6188          * be configured according to it
6189          */
6190         switch (an_info & I40E_LINK_PAUSE_RXTX) {
6191         case I40E_LINK_PAUSE_RXTX:
6192                 rxfc = 1;
6193                 txfc = 1;
6194                 hw->fc.current_mode = I40E_FC_FULL;
6195                 break;
6196         case I40E_AQ_LINK_PAUSE_RX:
6197                 rxfc = 1;
6198                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
6199                 break;
6200         case I40E_AQ_LINK_PAUSE_TX:
6201                 txfc = 1;
6202                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
6203                 break;
6204         default:
6205                 hw->fc.current_mode = I40E_FC_NONE;
6206                 break;
6207         }
6208
6209 write_reg:
6210         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6211                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6212         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6213         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6214         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6215         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6216
6217         return ret;
6218 }
6219
6220 /* PF setup */
6221 static int
6222 i40e_pf_setup(struct i40e_pf *pf)
6223 {
6224         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6225         struct i40e_filter_control_settings settings;
6226         struct i40e_vsi *vsi;
6227         int ret;
6228
6229         /* Clear all stats counters */
6230         pf->offset_loaded = FALSE;
6231         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6232         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6233         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6234         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6235
6236         ret = i40e_pf_get_switch_config(pf);
6237         if (ret != I40E_SUCCESS) {
6238                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6239                 return ret;
6240         }
6241
6242         ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6243         if (ret)
6244                 PMD_INIT_LOG(WARNING,
6245                         "failed to allocate switch domain for device %d", ret);
6246
6247         if (pf->flags & I40E_FLAG_FDIR) {
6248                 /* make queue allocated first, let FDIR use queue pair 0*/
6249                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6250                 if (ret != I40E_FDIR_QUEUE_ID) {
6251                         PMD_DRV_LOG(ERR,
6252                                 "queue allocation fails for FDIR: ret =%d",
6253                                 ret);
6254                         pf->flags &= ~I40E_FLAG_FDIR;
6255                 }
6256         }
6257         /*  main VSI setup */
6258         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6259         if (!vsi) {
6260                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6261                 return I40E_ERR_NOT_READY;
6262         }
6263         pf->main_vsi = vsi;
6264
6265         /* Configure filter control */
6266         memset(&settings, 0, sizeof(settings));
6267         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6268                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6269         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6270                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6271         else {
6272                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6273                         hw->func_caps.rss_table_size);
6274                 return I40E_ERR_PARAM;
6275         }
6276         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6277                 hw->func_caps.rss_table_size);
6278         pf->hash_lut_size = hw->func_caps.rss_table_size;
6279
6280         /* Enable ethtype and macvlan filters */
6281         settings.enable_ethtype = TRUE;
6282         settings.enable_macvlan = TRUE;
6283         ret = i40e_set_filter_control(hw, &settings);
6284         if (ret)
6285                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6286                                                                 ret);
6287
6288         /* Update flow control according to the auto negotiation */
6289         i40e_update_flow_control(hw);
6290
6291         return I40E_SUCCESS;
6292 }
6293
6294 int
6295 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6296 {
6297         uint32_t reg;
6298         uint16_t j;
6299
6300         /**
6301          * Set or clear TX Queue Disable flags,
6302          * which is required by hardware.
6303          */
6304         i40e_pre_tx_queue_cfg(hw, q_idx, on);
6305         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6306
6307         /* Wait until the request is finished */
6308         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6309                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6310                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6311                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6312                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6313                                                         & 0x1))) {
6314                         break;
6315                 }
6316         }
6317         if (on) {
6318                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6319                         return I40E_SUCCESS; /* already on, skip next steps */
6320
6321                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6322                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6323         } else {
6324                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6325                         return I40E_SUCCESS; /* already off, skip next steps */
6326                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6327         }
6328         /* Write the register */
6329         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6330         /* Check the result */
6331         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6332                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6333                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6334                 if (on) {
6335                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6336                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
6337                                 break;
6338                 } else {
6339                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6340                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6341                                 break;
6342                 }
6343         }
6344         /* Check if it is timeout */
6345         if (j >= I40E_CHK_Q_ENA_COUNT) {
6346                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6347                             (on ? "enable" : "disable"), q_idx);
6348                 return I40E_ERR_TIMEOUT;
6349         }
6350
6351         return I40E_SUCCESS;
6352 }
6353
6354 int
6355 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6356 {
6357         uint32_t reg;
6358         uint16_t j;
6359
6360         /* Wait until the request is finished */
6361         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6362                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6363                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6364                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6365                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6366                         break;
6367         }
6368
6369         if (on) {
6370                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6371                         return I40E_SUCCESS; /* Already on, skip next steps */
6372                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6373         } else {
6374                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6375                         return I40E_SUCCESS; /* Already off, skip next steps */
6376                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6377         }
6378
6379         /* Write the register */
6380         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6381         /* Check the result */
6382         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6383                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6384                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6385                 if (on) {
6386                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6387                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
6388                                 break;
6389                 } else {
6390                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6391                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6392                                 break;
6393                 }
6394         }
6395
6396         /* Check if it is timeout */
6397         if (j >= I40E_CHK_Q_ENA_COUNT) {
6398                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6399                             (on ? "enable" : "disable"), q_idx);
6400                 return I40E_ERR_TIMEOUT;
6401         }
6402
6403         return I40E_SUCCESS;
6404 }
6405
6406 /* Initialize VSI for TX */
6407 static int
6408 i40e_dev_tx_init(struct i40e_pf *pf)
6409 {
6410         struct rte_eth_dev_data *data = pf->dev_data;
6411         uint16_t i;
6412         uint32_t ret = I40E_SUCCESS;
6413         struct i40e_tx_queue *txq;
6414
6415         for (i = 0; i < data->nb_tx_queues; i++) {
6416                 txq = data->tx_queues[i];
6417                 if (!txq || !txq->q_set)
6418                         continue;
6419                 ret = i40e_tx_queue_init(txq);
6420                 if (ret != I40E_SUCCESS)
6421                         break;
6422         }
6423         if (ret == I40E_SUCCESS)
6424                 i40e_set_tx_function(&rte_eth_devices[pf->dev_data->port_id]);
6425
6426         return ret;
6427 }
6428
6429 /* Initialize VSI for RX */
6430 static int
6431 i40e_dev_rx_init(struct i40e_pf *pf)
6432 {
6433         struct rte_eth_dev_data *data = pf->dev_data;
6434         int ret = I40E_SUCCESS;
6435         uint16_t i;
6436         struct i40e_rx_queue *rxq;
6437
6438         i40e_pf_config_rss(pf);
6439         for (i = 0; i < data->nb_rx_queues; i++) {
6440                 rxq = data->rx_queues[i];
6441                 if (!rxq || !rxq->q_set)
6442                         continue;
6443
6444                 ret = i40e_rx_queue_init(rxq);
6445                 if (ret != I40E_SUCCESS) {
6446                         PMD_DRV_LOG(ERR,
6447                                 "Failed to do RX queue initialization");
6448                         break;
6449                 }
6450         }
6451         if (ret == I40E_SUCCESS)
6452                 i40e_set_rx_function(&rte_eth_devices[pf->dev_data->port_id]);
6453
6454         return ret;
6455 }
6456
6457 static int
6458 i40e_dev_rxtx_init(struct i40e_pf *pf)
6459 {
6460         int err;
6461
6462         err = i40e_dev_tx_init(pf);
6463         if (err) {
6464                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6465                 return err;
6466         }
6467         err = i40e_dev_rx_init(pf);
6468         if (err) {
6469                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6470                 return err;
6471         }
6472
6473         return err;
6474 }
6475
6476 static int
6477 i40e_vmdq_setup(struct rte_eth_dev *dev)
6478 {
6479         struct rte_eth_conf *conf = &dev->data->dev_conf;
6480         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6481         int i, err, conf_vsis, j, loop;
6482         struct i40e_vsi *vsi;
6483         struct i40e_vmdq_info *vmdq_info;
6484         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6485         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6486
6487         /*
6488          * Disable interrupt to avoid message from VF. Furthermore, it will
6489          * avoid race condition in VSI creation/destroy.
6490          */
6491         i40e_pf_disable_irq0(hw);
6492
6493         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6494                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6495                 return -ENOTSUP;
6496         }
6497
6498         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6499         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6500                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6501                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6502                         pf->max_nb_vmdq_vsi);
6503                 return -ENOTSUP;
6504         }
6505
6506         if (pf->vmdq != NULL) {
6507                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6508                 return 0;
6509         }
6510
6511         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6512                                 sizeof(*vmdq_info) * conf_vsis, 0);
6513
6514         if (pf->vmdq == NULL) {
6515                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6516                 return -ENOMEM;
6517         }
6518
6519         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6520
6521         /* Create VMDQ VSI */
6522         for (i = 0; i < conf_vsis; i++) {
6523                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6524                                 vmdq_conf->enable_loop_back);
6525                 if (vsi == NULL) {
6526                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6527                         err = -1;
6528                         goto err_vsi_setup;
6529                 }
6530                 vmdq_info = &pf->vmdq[i];
6531                 vmdq_info->pf = pf;
6532                 vmdq_info->vsi = vsi;
6533         }
6534         pf->nb_cfg_vmdq_vsi = conf_vsis;
6535
6536         /* Configure Vlan */
6537         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6538         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6539                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6540                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6541                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6542                                         vmdq_conf->pool_map[i].vlan_id, j);
6543
6544                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6545                                                 vmdq_conf->pool_map[i].vlan_id);
6546                                 if (err) {
6547                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6548                                         err = -1;
6549                                         goto err_vsi_setup;
6550                                 }
6551                         }
6552                 }
6553         }
6554
6555         i40e_pf_enable_irq0(hw);
6556
6557         return 0;
6558
6559 err_vsi_setup:
6560         for (i = 0; i < conf_vsis; i++)
6561                 if (pf->vmdq[i].vsi == NULL)
6562                         break;
6563                 else
6564                         i40e_vsi_release(pf->vmdq[i].vsi);
6565
6566         rte_free(pf->vmdq);
6567         pf->vmdq = NULL;
6568         i40e_pf_enable_irq0(hw);
6569         return err;
6570 }
6571
6572 static void
6573 i40e_stat_update_32(struct i40e_hw *hw,
6574                    uint32_t reg,
6575                    bool offset_loaded,
6576                    uint64_t *offset,
6577                    uint64_t *stat)
6578 {
6579         uint64_t new_data;
6580
6581         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6582         if (!offset_loaded)
6583                 *offset = new_data;
6584
6585         if (new_data >= *offset)
6586                 *stat = (uint64_t)(new_data - *offset);
6587         else
6588                 *stat = (uint64_t)((new_data +
6589                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6590 }
6591
6592 static void
6593 i40e_stat_update_48(struct i40e_hw *hw,
6594                    uint32_t hireg,
6595                    uint32_t loreg,
6596                    bool offset_loaded,
6597                    uint64_t *offset,
6598                    uint64_t *stat)
6599 {
6600         uint64_t new_data;
6601
6602         if (hw->device_id == I40E_DEV_ID_QEMU) {
6603                 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6604                 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6605                                 I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6606         } else {
6607                 new_data = I40E_READ_REG64(hw, loreg);
6608         }
6609
6610         if (!offset_loaded)
6611                 *offset = new_data;
6612
6613         if (new_data >= *offset)
6614                 *stat = new_data - *offset;
6615         else
6616                 *stat = (uint64_t)((new_data +
6617                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6618
6619         *stat &= I40E_48_BIT_MASK;
6620 }
6621
6622 /* Disable IRQ0 */
6623 void
6624 i40e_pf_disable_irq0(struct i40e_hw *hw)
6625 {
6626         /* Disable all interrupt types */
6627         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6628                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6629         I40E_WRITE_FLUSH(hw);
6630 }
6631
6632 /* Enable IRQ0 */
6633 void
6634 i40e_pf_enable_irq0(struct i40e_hw *hw)
6635 {
6636         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6637                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6638                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6639                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6640         I40E_WRITE_FLUSH(hw);
6641 }
6642
6643 static void
6644 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6645 {
6646         /* read pending request and disable first */
6647         i40e_pf_disable_irq0(hw);
6648         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6649         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6650                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6651
6652         if (no_queue)
6653                 /* Link no queues with irq0 */
6654                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6655                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6656 }
6657
6658 static void
6659 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6660 {
6661         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6662         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6663         int i;
6664         uint16_t abs_vf_id;
6665         uint32_t index, offset, val;
6666
6667         if (!pf->vfs)
6668                 return;
6669         /**
6670          * Try to find which VF trigger a reset, use absolute VF id to access
6671          * since the reg is global register.
6672          */
6673         for (i = 0; i < pf->vf_num; i++) {
6674                 abs_vf_id = hw->func_caps.vf_base_id + i;
6675                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6676                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6677                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6678                 /* VFR event occurred */
6679                 if (val & (0x1 << offset)) {
6680                         int ret;
6681
6682                         /* Clear the event first */
6683                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6684                                                         (0x1 << offset));
6685                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6686                         /**
6687                          * Only notify a VF reset event occurred,
6688                          * don't trigger another SW reset
6689                          */
6690                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6691                         if (ret != I40E_SUCCESS)
6692                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6693                 }
6694         }
6695 }
6696
6697 static void
6698 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6699 {
6700         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6701         int i;
6702
6703         for (i = 0; i < pf->vf_num; i++)
6704                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6705 }
6706
6707 static void
6708 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6709 {
6710         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6711         struct i40e_arq_event_info info;
6712         uint16_t pending, opcode;
6713         int ret;
6714
6715         info.buf_len = I40E_AQ_BUF_SZ;
6716         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6717         if (!info.msg_buf) {
6718                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6719                 return;
6720         }
6721
6722         pending = 1;
6723         while (pending) {
6724                 ret = i40e_clean_arq_element(hw, &info, &pending);
6725
6726                 if (ret != I40E_SUCCESS) {
6727                         PMD_DRV_LOG(INFO,
6728                                 "Failed to read msg from AdminQ, aq_err: %u",
6729                                 hw->aq.asq_last_status);
6730                         break;
6731                 }
6732                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6733
6734                 switch (opcode) {
6735                 case i40e_aqc_opc_send_msg_to_pf:
6736                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6737                         i40e_pf_host_handle_vf_msg(dev,
6738                                         rte_le_to_cpu_16(info.desc.retval),
6739                                         rte_le_to_cpu_32(info.desc.cookie_high),
6740                                         rte_le_to_cpu_32(info.desc.cookie_low),
6741                                         info.msg_buf,
6742                                         info.msg_len);
6743                         break;
6744                 case i40e_aqc_opc_get_link_status:
6745                         ret = i40e_dev_link_update(dev, 0);
6746                         if (!ret)
6747                                 rte_eth_dev_callback_process(dev,
6748                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6749                         break;
6750                 default:
6751                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6752                                     opcode);
6753                         break;
6754                 }
6755         }
6756         rte_free(info.msg_buf);
6757 }
6758
6759 static void
6760 i40e_handle_mdd_event(struct rte_eth_dev *dev)
6761 {
6762 #define I40E_MDD_CLEAR32 0xFFFFFFFF
6763 #define I40E_MDD_CLEAR16 0xFFFF
6764         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6765         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6766         bool mdd_detected = false;
6767         struct i40e_pf_vf *vf;
6768         uint32_t reg;
6769         int i;
6770
6771         /* find what triggered the MDD event */
6772         reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6773         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6774                 uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6775                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6776                 uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6777                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6778                 uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6779                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6780                 uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6781                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6782                                         hw->func_caps.base_queue;
6783                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6784                         "queue %d PF number 0x%02x VF number 0x%02x device %s\n",
6785                                 event, queue, pf_num, vf_num, dev->data->name);
6786                 I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6787                 mdd_detected = true;
6788         }
6789         reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6790         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6791                 uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6792                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6793                 uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6794                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6795                 uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6796                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6797                                         hw->func_caps.base_queue;
6798
6799                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6800                                 "queue %d of function 0x%02x device %s\n",
6801                                         event, queue, func, dev->data->name);
6802                 I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6803                 mdd_detected = true;
6804         }
6805
6806         if (mdd_detected) {
6807                 reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6808                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6809                         I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6810                         PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
6811                 }
6812                 reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6813                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6814                         I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6815                                         I40E_MDD_CLEAR16);
6816                         PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
6817                 }
6818         }
6819
6820         /* see if one of the VFs needs its hand slapped */
6821         for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6822                 vf = &pf->vfs[i];
6823                 reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6824                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6825                         I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
6826                                         I40E_MDD_CLEAR16);
6827                         vf->num_mdd_events++;
6828                         PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
6829                                         PRIu64 "times\n",
6830                                         i, vf->num_mdd_events);
6831                 }
6832
6833                 reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
6834                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6835                         I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
6836                                         I40E_MDD_CLEAR16);
6837                         vf->num_mdd_events++;
6838                         PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
6839                                         PRIu64 "times\n",
6840                                         i, vf->num_mdd_events);
6841                 }
6842         }
6843 }
6844
6845 /**
6846  * Interrupt handler triggered by NIC  for handling
6847  * specific interrupt.
6848  *
6849  * @param handle
6850  *  Pointer to interrupt handle.
6851  * @param param
6852  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6853  *
6854  * @return
6855  *  void
6856  */
6857 static void
6858 i40e_dev_interrupt_handler(void *param)
6859 {
6860         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6861         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6862         uint32_t icr0;
6863
6864         /* Disable interrupt */
6865         i40e_pf_disable_irq0(hw);
6866
6867         /* read out interrupt causes */
6868         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6869
6870         /* No interrupt event indicated */
6871         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6872                 PMD_DRV_LOG(INFO, "No interrupt event");
6873                 goto done;
6874         }
6875         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6876                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6877         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6878                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6879                 i40e_handle_mdd_event(dev);
6880         }
6881         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6882                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6883         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6884                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6885         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6886                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6887         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6888                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6889         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6890                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6891
6892         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6893                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6894                 i40e_dev_handle_vfr_event(dev);
6895         }
6896         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6897                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6898                 i40e_dev_handle_aq_msg(dev);
6899         }
6900
6901 done:
6902         /* Enable interrupt */
6903         i40e_pf_enable_irq0(hw);
6904 }
6905
6906 static void
6907 i40e_dev_alarm_handler(void *param)
6908 {
6909         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6910         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6911         uint32_t icr0;
6912
6913         /* Disable interrupt */
6914         i40e_pf_disable_irq0(hw);
6915
6916         /* read out interrupt causes */
6917         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6918
6919         /* No interrupt event indicated */
6920         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
6921                 goto done;
6922         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6923                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6924         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6925                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6926                 i40e_handle_mdd_event(dev);
6927         }
6928         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6929                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6930         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6931                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6932         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6933                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6934         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6935                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6936         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6937                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6938
6939         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6940                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6941                 i40e_dev_handle_vfr_event(dev);
6942         }
6943         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6944                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6945                 i40e_dev_handle_aq_msg(dev);
6946         }
6947
6948 done:
6949         /* Enable interrupt */
6950         i40e_pf_enable_irq0(hw);
6951         rte_eal_alarm_set(I40E_ALARM_INTERVAL,
6952                           i40e_dev_alarm_handler, dev);
6953 }
6954
6955 int
6956 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6957                          struct i40e_macvlan_filter *filter,
6958                          int total)
6959 {
6960         int ele_num, ele_buff_size;
6961         int num, actual_num, i;
6962         uint16_t flags;
6963         int ret = I40E_SUCCESS;
6964         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6965         struct i40e_aqc_add_macvlan_element_data *req_list;
6966
6967         if (filter == NULL  || total == 0)
6968                 return I40E_ERR_PARAM;
6969         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6970         ele_buff_size = hw->aq.asq_buf_size;
6971
6972         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6973         if (req_list == NULL) {
6974                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6975                 return I40E_ERR_NO_MEMORY;
6976         }
6977
6978         num = 0;
6979         do {
6980                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6981                 memset(req_list, 0, ele_buff_size);
6982
6983                 for (i = 0; i < actual_num; i++) {
6984                         rte_memcpy(req_list[i].mac_addr,
6985                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6986                         req_list[i].vlan_tag =
6987                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6988
6989                         switch (filter[num + i].filter_type) {
6990                         case I40E_MAC_PERFECT_MATCH:
6991                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6992                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6993                                 break;
6994                         case I40E_MACVLAN_PERFECT_MATCH:
6995                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6996                                 break;
6997                         case I40E_MAC_HASH_MATCH:
6998                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6999                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
7000                                 break;
7001                         case I40E_MACVLAN_HASH_MATCH:
7002                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
7003                                 break;
7004                         default:
7005                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
7006                                 ret = I40E_ERR_PARAM;
7007                                 goto DONE;
7008                         }
7009
7010                         req_list[i].queue_number = 0;
7011
7012                         req_list[i].flags = rte_cpu_to_le_16(flags);
7013                 }
7014
7015                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
7016                                                 actual_num, NULL);
7017                 if (ret != I40E_SUCCESS) {
7018                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
7019                         goto DONE;
7020                 }
7021                 num += actual_num;
7022         } while (num < total);
7023
7024 DONE:
7025         rte_free(req_list);
7026         return ret;
7027 }
7028
7029 int
7030 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
7031                             struct i40e_macvlan_filter *filter,
7032                             int total)
7033 {
7034         int ele_num, ele_buff_size;
7035         int num, actual_num, i;
7036         uint16_t flags;
7037         int ret = I40E_SUCCESS;
7038         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7039         struct i40e_aqc_remove_macvlan_element_data *req_list;
7040
7041         if (filter == NULL  || total == 0)
7042                 return I40E_ERR_PARAM;
7043
7044         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7045         ele_buff_size = hw->aq.asq_buf_size;
7046
7047         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7048         if (req_list == NULL) {
7049                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
7050                 return I40E_ERR_NO_MEMORY;
7051         }
7052
7053         num = 0;
7054         do {
7055                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7056                 memset(req_list, 0, ele_buff_size);
7057
7058                 for (i = 0; i < actual_num; i++) {
7059                         rte_memcpy(req_list[i].mac_addr,
7060                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
7061                         req_list[i].vlan_tag =
7062                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
7063
7064                         switch (filter[num + i].filter_type) {
7065                         case I40E_MAC_PERFECT_MATCH:
7066                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7067                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7068                                 break;
7069                         case I40E_MACVLAN_PERFECT_MATCH:
7070                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7071                                 break;
7072                         case I40E_MAC_HASH_MATCH:
7073                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7074                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7075                                 break;
7076                         case I40E_MACVLAN_HASH_MATCH:
7077                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7078                                 break;
7079                         default:
7080                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7081                                 ret = I40E_ERR_PARAM;
7082                                 goto DONE;
7083                         }
7084                         req_list[i].flags = rte_cpu_to_le_16(flags);
7085                 }
7086
7087                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
7088                                                 actual_num, NULL);
7089                 if (ret != I40E_SUCCESS) {
7090                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7091                         goto DONE;
7092                 }
7093                 num += actual_num;
7094         } while (num < total);
7095
7096 DONE:
7097         rte_free(req_list);
7098         return ret;
7099 }
7100
7101 /* Find out specific MAC filter */
7102 static struct i40e_mac_filter *
7103 i40e_find_mac_filter(struct i40e_vsi *vsi,
7104                          struct rte_ether_addr *macaddr)
7105 {
7106         struct i40e_mac_filter *f;
7107
7108         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7109                 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7110                         return f;
7111         }
7112
7113         return NULL;
7114 }
7115
7116 static bool
7117 i40e_find_vlan_filter(struct i40e_vsi *vsi,
7118                          uint16_t vlan_id)
7119 {
7120         uint32_t vid_idx, vid_bit;
7121
7122         if (vlan_id > ETH_VLAN_ID_MAX)
7123                 return 0;
7124
7125         vid_idx = I40E_VFTA_IDX(vlan_id);
7126         vid_bit = I40E_VFTA_BIT(vlan_id);
7127
7128         if (vsi->vfta[vid_idx] & vid_bit)
7129                 return 1;
7130         else
7131                 return 0;
7132 }
7133
7134 static void
7135 i40e_store_vlan_filter(struct i40e_vsi *vsi,
7136                        uint16_t vlan_id, bool on)
7137 {
7138         uint32_t vid_idx, vid_bit;
7139
7140         vid_idx = I40E_VFTA_IDX(vlan_id);
7141         vid_bit = I40E_VFTA_BIT(vlan_id);
7142
7143         if (on)
7144                 vsi->vfta[vid_idx] |= vid_bit;
7145         else
7146                 vsi->vfta[vid_idx] &= ~vid_bit;
7147 }
7148
7149 void
7150 i40e_set_vlan_filter(struct i40e_vsi *vsi,
7151                      uint16_t vlan_id, bool on)
7152 {
7153         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7154         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7155         int ret;
7156
7157         if (vlan_id > ETH_VLAN_ID_MAX)
7158                 return;
7159
7160         i40e_store_vlan_filter(vsi, vlan_id, on);
7161
7162         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7163                 return;
7164
7165         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7166
7167         if (on) {
7168                 ret = i40e_aq_add_vlan(hw, vsi->seid,
7169                                        &vlan_data, 1, NULL);
7170                 if (ret != I40E_SUCCESS)
7171                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7172         } else {
7173                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
7174                                           &vlan_data, 1, NULL);
7175                 if (ret != I40E_SUCCESS)
7176                         PMD_DRV_LOG(ERR,
7177                                     "Failed to remove vlan filter");
7178         }
7179 }
7180
7181 /**
7182  * Find all vlan options for specific mac addr,
7183  * return with actual vlan found.
7184  */
7185 int
7186 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7187                            struct i40e_macvlan_filter *mv_f,
7188                            int num, struct rte_ether_addr *addr)
7189 {
7190         int i;
7191         uint32_t j, k;
7192
7193         /**
7194          * Not to use i40e_find_vlan_filter to decrease the loop time,
7195          * although the code looks complex.
7196           */
7197         if (num < vsi->vlan_num)
7198                 return I40E_ERR_PARAM;
7199
7200         i = 0;
7201         for (j = 0; j < I40E_VFTA_SIZE; j++) {
7202                 if (vsi->vfta[j]) {
7203                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7204                                 if (vsi->vfta[j] & (1 << k)) {
7205                                         if (i > num - 1) {
7206                                                 PMD_DRV_LOG(ERR,
7207                                                         "vlan number doesn't match");
7208                                                 return I40E_ERR_PARAM;
7209                                         }
7210                                         rte_memcpy(&mv_f[i].macaddr,
7211                                                         addr, ETH_ADDR_LEN);
7212                                         mv_f[i].vlan_id =
7213                                                 j * I40E_UINT32_BIT_SIZE + k;
7214                                         i++;
7215                                 }
7216                         }
7217                 }
7218         }
7219         return I40E_SUCCESS;
7220 }
7221
7222 static inline int
7223 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7224                            struct i40e_macvlan_filter *mv_f,
7225                            int num,
7226                            uint16_t vlan)
7227 {
7228         int i = 0;
7229         struct i40e_mac_filter *f;
7230
7231         if (num < vsi->mac_num)
7232                 return I40E_ERR_PARAM;
7233
7234         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7235                 if (i > num - 1) {
7236                         PMD_DRV_LOG(ERR, "buffer number not match");
7237                         return I40E_ERR_PARAM;
7238                 }
7239                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7240                                 ETH_ADDR_LEN);
7241                 mv_f[i].vlan_id = vlan;
7242                 mv_f[i].filter_type = f->mac_info.filter_type;
7243                 i++;
7244         }
7245
7246         return I40E_SUCCESS;
7247 }
7248
7249 static int
7250 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7251 {
7252         int i, j, num;
7253         struct i40e_mac_filter *f;
7254         struct i40e_macvlan_filter *mv_f;
7255         int ret = I40E_SUCCESS;
7256
7257         if (vsi == NULL || vsi->mac_num == 0)
7258                 return I40E_ERR_PARAM;
7259
7260         /* Case that no vlan is set */
7261         if (vsi->vlan_num == 0)
7262                 num = vsi->mac_num;
7263         else
7264                 num = vsi->mac_num * vsi->vlan_num;
7265
7266         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7267         if (mv_f == NULL) {
7268                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7269                 return I40E_ERR_NO_MEMORY;
7270         }
7271
7272         i = 0;
7273         if (vsi->vlan_num == 0) {
7274                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7275                         rte_memcpy(&mv_f[i].macaddr,
7276                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
7277                         mv_f[i].filter_type = f->mac_info.filter_type;
7278                         mv_f[i].vlan_id = 0;
7279                         i++;
7280                 }
7281         } else {
7282                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7283                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7284                                         vsi->vlan_num, &f->mac_info.mac_addr);
7285                         if (ret != I40E_SUCCESS)
7286                                 goto DONE;
7287                         for (j = i; j < i + vsi->vlan_num; j++)
7288                                 mv_f[j].filter_type = f->mac_info.filter_type;
7289                         i += vsi->vlan_num;
7290                 }
7291         }
7292
7293         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7294 DONE:
7295         rte_free(mv_f);
7296
7297         return ret;
7298 }
7299
7300 int
7301 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7302 {
7303         struct i40e_macvlan_filter *mv_f;
7304         int mac_num;
7305         int ret = I40E_SUCCESS;
7306
7307         if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7308                 return I40E_ERR_PARAM;
7309
7310         /* If it's already set, just return */
7311         if (i40e_find_vlan_filter(vsi,vlan))
7312                 return I40E_SUCCESS;
7313
7314         mac_num = vsi->mac_num;
7315
7316         if (mac_num == 0) {
7317                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7318                 return I40E_ERR_PARAM;
7319         }
7320
7321         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7322
7323         if (mv_f == NULL) {
7324                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7325                 return I40E_ERR_NO_MEMORY;
7326         }
7327
7328         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7329
7330         if (ret != I40E_SUCCESS)
7331                 goto DONE;
7332
7333         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7334
7335         if (ret != I40E_SUCCESS)
7336                 goto DONE;
7337
7338         i40e_set_vlan_filter(vsi, vlan, 1);
7339
7340         vsi->vlan_num++;
7341         ret = I40E_SUCCESS;
7342 DONE:
7343         rte_free(mv_f);
7344         return ret;
7345 }
7346
7347 int
7348 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7349 {
7350         struct i40e_macvlan_filter *mv_f;
7351         int mac_num;
7352         int ret = I40E_SUCCESS;
7353
7354         /**
7355          * Vlan 0 is the generic filter for untagged packets
7356          * and can't be removed.
7357          */
7358         if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7359                 return I40E_ERR_PARAM;
7360
7361         /* If can't find it, just return */
7362         if (!i40e_find_vlan_filter(vsi, vlan))
7363                 return I40E_ERR_PARAM;
7364
7365         mac_num = vsi->mac_num;
7366
7367         if (mac_num == 0) {
7368                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7369                 return I40E_ERR_PARAM;
7370         }
7371
7372         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7373
7374         if (mv_f == NULL) {
7375                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7376                 return I40E_ERR_NO_MEMORY;
7377         }
7378
7379         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7380
7381         if (ret != I40E_SUCCESS)
7382                 goto DONE;
7383
7384         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7385
7386         if (ret != I40E_SUCCESS)
7387                 goto DONE;
7388
7389         /* This is last vlan to remove, replace all mac filter with vlan 0 */
7390         if (vsi->vlan_num == 1) {
7391                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7392                 if (ret != I40E_SUCCESS)
7393                         goto DONE;
7394
7395                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7396                 if (ret != I40E_SUCCESS)
7397                         goto DONE;
7398         }
7399
7400         i40e_set_vlan_filter(vsi, vlan, 0);
7401
7402         vsi->vlan_num--;
7403         ret = I40E_SUCCESS;
7404 DONE:
7405         rte_free(mv_f);
7406         return ret;
7407 }
7408
7409 int
7410 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7411 {
7412         struct i40e_mac_filter *f;
7413         struct i40e_macvlan_filter *mv_f;
7414         int i, vlan_num = 0;
7415         int ret = I40E_SUCCESS;
7416
7417         /* If it's add and we've config it, return */
7418         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7419         if (f != NULL)
7420                 return I40E_SUCCESS;
7421         if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7422                 mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7423
7424                 /**
7425                  * If vlan_num is 0, that's the first time to add mac,
7426                  * set mask for vlan_id 0.
7427                  */
7428                 if (vsi->vlan_num == 0) {
7429                         i40e_set_vlan_filter(vsi, 0, 1);
7430                         vsi->vlan_num = 1;
7431                 }
7432                 vlan_num = vsi->vlan_num;
7433         } else if (mac_filter->filter_type == I40E_MAC_PERFECT_MATCH ||
7434                         mac_filter->filter_type == I40E_MAC_HASH_MATCH)
7435                 vlan_num = 1;
7436
7437         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7438         if (mv_f == NULL) {
7439                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7440                 return I40E_ERR_NO_MEMORY;
7441         }
7442
7443         for (i = 0; i < vlan_num; i++) {
7444                 mv_f[i].filter_type = mac_filter->filter_type;
7445                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7446                                 ETH_ADDR_LEN);
7447         }
7448
7449         if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7450                 mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7451                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7452                                         &mac_filter->mac_addr);
7453                 if (ret != I40E_SUCCESS)
7454                         goto DONE;
7455         }
7456
7457         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7458         if (ret != I40E_SUCCESS)
7459                 goto DONE;
7460
7461         /* Add the mac addr into mac list */
7462         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7463         if (f == NULL) {
7464                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7465                 ret = I40E_ERR_NO_MEMORY;
7466                 goto DONE;
7467         }
7468         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7469                         ETH_ADDR_LEN);
7470         f->mac_info.filter_type = mac_filter->filter_type;
7471         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7472         vsi->mac_num++;
7473
7474         ret = I40E_SUCCESS;
7475 DONE:
7476         rte_free(mv_f);
7477
7478         return ret;
7479 }
7480
7481 int
7482 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7483 {
7484         struct i40e_mac_filter *f;
7485         struct i40e_macvlan_filter *mv_f;
7486         int i, vlan_num;
7487         enum i40e_mac_filter_type filter_type;
7488         int ret = I40E_SUCCESS;
7489
7490         /* Can't find it, return an error */
7491         f = i40e_find_mac_filter(vsi, addr);
7492         if (f == NULL)
7493                 return I40E_ERR_PARAM;
7494
7495         vlan_num = vsi->vlan_num;
7496         filter_type = f->mac_info.filter_type;
7497         if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7498                 filter_type == I40E_MACVLAN_HASH_MATCH) {
7499                 if (vlan_num == 0) {
7500                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7501                         return I40E_ERR_PARAM;
7502                 }
7503         } else if (filter_type == I40E_MAC_PERFECT_MATCH ||
7504                         filter_type == I40E_MAC_HASH_MATCH)
7505                 vlan_num = 1;
7506
7507         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7508         if (mv_f == NULL) {
7509                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7510                 return I40E_ERR_NO_MEMORY;
7511         }
7512
7513         for (i = 0; i < vlan_num; i++) {
7514                 mv_f[i].filter_type = filter_type;
7515                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7516                                 ETH_ADDR_LEN);
7517         }
7518         if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7519                         filter_type == I40E_MACVLAN_HASH_MATCH) {
7520                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7521                 if (ret != I40E_SUCCESS)
7522                         goto DONE;
7523         }
7524
7525         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7526         if (ret != I40E_SUCCESS)
7527                 goto DONE;
7528
7529         /* Remove the mac addr into mac list */
7530         TAILQ_REMOVE(&vsi->mac_list, f, next);
7531         rte_free(f);
7532         vsi->mac_num--;
7533
7534         ret = I40E_SUCCESS;
7535 DONE:
7536         rte_free(mv_f);
7537         return ret;
7538 }
7539
7540 /* Configure hash enable flags for RSS */
7541 uint64_t
7542 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7543 {
7544         uint64_t hena = 0;
7545         int i;
7546
7547         if (!flags)
7548                 return hena;
7549
7550         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7551                 if (flags & (1ULL << i))
7552                         hena |= adapter->pctypes_tbl[i];
7553         }
7554
7555         return hena;
7556 }
7557
7558 /* Parse the hash enable flags */
7559 uint64_t
7560 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7561 {
7562         uint64_t rss_hf = 0;
7563
7564         if (!flags)
7565                 return rss_hf;
7566         int i;
7567
7568         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7569                 if (flags & adapter->pctypes_tbl[i])
7570                         rss_hf |= (1ULL << i);
7571         }
7572         return rss_hf;
7573 }
7574
7575 /* Disable RSS */
7576 void
7577 i40e_pf_disable_rss(struct i40e_pf *pf)
7578 {
7579         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7580
7581         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7582         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7583         I40E_WRITE_FLUSH(hw);
7584 }
7585
7586 int
7587 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7588 {
7589         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7590         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7591         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7592                            I40E_VFQF_HKEY_MAX_INDEX :
7593                            I40E_PFQF_HKEY_MAX_INDEX;
7594
7595         if (!key || key_len == 0) {
7596                 PMD_DRV_LOG(DEBUG, "No key to be configured");
7597                 return 0;
7598         } else if (key_len != (key_idx + 1) *
7599                 sizeof(uint32_t)) {
7600                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7601                 return -EINVAL;
7602         }
7603
7604         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7605                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7606                                 (struct i40e_aqc_get_set_rss_key_data *)key;
7607                 enum i40e_status_code status =
7608                                 i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7609
7610                 if (status) {
7611                         PMD_DRV_LOG(ERR,
7612                                     "Failed to configure RSS key via AQ, error status: %d",
7613                                     status);
7614                         return -EIO;
7615                 }
7616         } else {
7617                 uint32_t *hash_key = (uint32_t *)key;
7618                 uint16_t i;
7619
7620                 if (vsi->type == I40E_VSI_SRIOV) {
7621                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7622                                 I40E_WRITE_REG(
7623                                         hw,
7624                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7625                                         hash_key[i]);
7626
7627                 } else {
7628                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7629                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7630                                                hash_key[i]);
7631                 }
7632                 I40E_WRITE_FLUSH(hw);
7633         }
7634
7635         return 0;
7636 }
7637
7638 static int
7639 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7640 {
7641         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7642         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7643         uint32_t reg;
7644         int ret;
7645
7646         if (!key || !key_len)
7647                 return 0;
7648
7649         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7650                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7651                         (struct i40e_aqc_get_set_rss_key_data *)key);
7652                 if (ret) {
7653                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7654                         return ret;
7655                 }
7656         } else {
7657                 uint32_t *key_dw = (uint32_t *)key;
7658                 uint16_t i;
7659
7660                 if (vsi->type == I40E_VSI_SRIOV) {
7661                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7662                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7663                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7664                         }
7665                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7666                                    sizeof(uint32_t);
7667                 } else {
7668                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7669                                 reg = I40E_PFQF_HKEY(i);
7670                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7671                         }
7672                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7673                                    sizeof(uint32_t);
7674                 }
7675         }
7676         return 0;
7677 }
7678
7679 static int
7680 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7681 {
7682         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7683         uint64_t hena;
7684         int ret;
7685
7686         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7687                                rss_conf->rss_key_len);
7688         if (ret)
7689                 return ret;
7690
7691         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7692         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7693         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7694         I40E_WRITE_FLUSH(hw);
7695
7696         return 0;
7697 }
7698
7699 static int
7700 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7701                          struct rte_eth_rss_conf *rss_conf)
7702 {
7703         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7704         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7705         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7706         uint64_t hena;
7707
7708         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7709         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7710
7711         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7712                 if (rss_hf != 0) /* Enable RSS */
7713                         return -EINVAL;
7714                 return 0; /* Nothing to do */
7715         }
7716         /* RSS enabled */
7717         if (rss_hf == 0) /* Disable RSS */
7718                 return -EINVAL;
7719
7720         return i40e_hw_rss_hash_set(pf, rss_conf);
7721 }
7722
7723 static int
7724 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7725                            struct rte_eth_rss_conf *rss_conf)
7726 {
7727         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7728         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7729         uint64_t hena;
7730         int ret;
7731
7732         if (!rss_conf)
7733                 return -EINVAL;
7734
7735         ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7736                          &rss_conf->rss_key_len);
7737         if (ret)
7738                 return ret;
7739
7740         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7741         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7742         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7743
7744         return 0;
7745 }
7746
7747 static int
7748 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7749 {
7750         switch (filter_type) {
7751         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7752                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7753                 break;
7754         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7755                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7756                 break;
7757         case RTE_TUNNEL_FILTER_IMAC_TENID:
7758                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7759                 break;
7760         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7761                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7762                 break;
7763         case ETH_TUNNEL_FILTER_IMAC:
7764                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7765                 break;
7766         case ETH_TUNNEL_FILTER_OIP:
7767                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7768                 break;
7769         case ETH_TUNNEL_FILTER_IIP:
7770                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7771                 break;
7772         default:
7773                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7774                 return -EINVAL;
7775         }
7776
7777         return 0;
7778 }
7779
7780 /* Convert tunnel filter structure */
7781 static int
7782 i40e_tunnel_filter_convert(
7783         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7784         struct i40e_tunnel_filter *tunnel_filter)
7785 {
7786         rte_ether_addr_copy((struct rte_ether_addr *)
7787                         &cld_filter->element.outer_mac,
7788                 (struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7789         rte_ether_addr_copy((struct rte_ether_addr *)
7790                         &cld_filter->element.inner_mac,
7791                 (struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7792         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7793         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7794              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7795             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7796                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7797         else
7798                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7799         tunnel_filter->input.flags = cld_filter->element.flags;
7800         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7801         tunnel_filter->queue = cld_filter->element.queue_number;
7802         rte_memcpy(tunnel_filter->input.general_fields,
7803                    cld_filter->general_fields,
7804                    sizeof(cld_filter->general_fields));
7805
7806         return 0;
7807 }
7808
7809 /* Check if there exists the tunnel filter */
7810 struct i40e_tunnel_filter *
7811 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7812                              const struct i40e_tunnel_filter_input *input)
7813 {
7814         int ret;
7815
7816         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7817         if (ret < 0)
7818                 return NULL;
7819
7820         return tunnel_rule->hash_map[ret];
7821 }
7822
7823 /* Add a tunnel filter into the SW list */
7824 static int
7825 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7826                              struct i40e_tunnel_filter *tunnel_filter)
7827 {
7828         struct i40e_tunnel_rule *rule = &pf->tunnel;
7829         int ret;
7830
7831         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7832         if (ret < 0) {
7833                 PMD_DRV_LOG(ERR,
7834                             "Failed to insert tunnel filter to hash table %d!",
7835                             ret);
7836                 return ret;
7837         }
7838         rule->hash_map[ret] = tunnel_filter;
7839
7840         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7841
7842         return 0;
7843 }
7844
7845 /* Delete a tunnel filter from the SW list */
7846 int
7847 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7848                           struct i40e_tunnel_filter_input *input)
7849 {
7850         struct i40e_tunnel_rule *rule = &pf->tunnel;
7851         struct i40e_tunnel_filter *tunnel_filter;
7852         int ret;
7853
7854         ret = rte_hash_del_key(rule->hash_table, input);
7855         if (ret < 0) {
7856                 PMD_DRV_LOG(ERR,
7857                             "Failed to delete tunnel filter to hash table %d!",
7858                             ret);
7859                 return ret;
7860         }
7861         tunnel_filter = rule->hash_map[ret];
7862         rule->hash_map[ret] = NULL;
7863
7864         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7865         rte_free(tunnel_filter);
7866
7867         return 0;
7868 }
7869
7870 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7871 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7872 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7873 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7874 #define I40E_TR_GRE_KEY_MASK                    0x400
7875 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7876 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7877 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
7878 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
7879 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
7880 #define I40E_DIRECTION_INGRESS_KEY              0x8000
7881 #define I40E_TR_L4_TYPE_TCP                     0x2
7882 #define I40E_TR_L4_TYPE_UDP                     0x4
7883 #define I40E_TR_L4_TYPE_SCTP                    0x8
7884
7885 static enum
7886 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7887 {
7888         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7889         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7890         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7891         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
7892         enum i40e_status_code status = I40E_SUCCESS;
7893
7894         if (pf->support_multi_driver) {
7895                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7896                 return I40E_NOT_SUPPORTED;
7897         }
7898
7899         memset(&filter_replace, 0,
7900                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7901         memset(&filter_replace_buf, 0,
7902                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7903
7904         /* create L1 filter */
7905         filter_replace.old_filter_type =
7906                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7907         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7908         filter_replace.tr_bit = 0;
7909
7910         /* Prepare the buffer, 3 entries */
7911         filter_replace_buf.data[0] =
7912                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7913         filter_replace_buf.data[0] |=
7914                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7915         filter_replace_buf.data[2] = 0xFF;
7916         filter_replace_buf.data[3] = 0xFF;
7917         filter_replace_buf.data[4] =
7918                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7919         filter_replace_buf.data[4] |=
7920                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7921         filter_replace_buf.data[7] = 0xF0;
7922         filter_replace_buf.data[8]
7923                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7924         filter_replace_buf.data[8] |=
7925                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7926         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7927                 I40E_TR_GENEVE_KEY_MASK |
7928                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7929         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7930                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7931                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7932
7933         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7934                                                &filter_replace_buf);
7935         if (!status && (filter_replace.old_filter_type !=
7936                         filter_replace.new_filter_type))
7937                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7938                             " original: 0x%x, new: 0x%x",
7939                             dev->device->name,
7940                             filter_replace.old_filter_type,
7941                             filter_replace.new_filter_type);
7942
7943         return status;
7944 }
7945
7946 static enum
7947 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7948 {
7949         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7950         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7951         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7952         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
7953         enum i40e_status_code status = I40E_SUCCESS;
7954
7955         if (pf->support_multi_driver) {
7956                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7957                 return I40E_NOT_SUPPORTED;
7958         }
7959
7960         /* For MPLSoUDP */
7961         memset(&filter_replace, 0,
7962                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7963         memset(&filter_replace_buf, 0,
7964                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7965         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7966                 I40E_AQC_MIRROR_CLOUD_FILTER;
7967         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7968         filter_replace.new_filter_type =
7969                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7970         /* Prepare the buffer, 2 entries */
7971         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7972         filter_replace_buf.data[0] |=
7973                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7974         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7975         filter_replace_buf.data[4] |=
7976                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7977         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7978                                                &filter_replace_buf);
7979         if (status < 0)
7980                 return status;
7981         if (filter_replace.old_filter_type !=
7982             filter_replace.new_filter_type)
7983                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7984                             " original: 0x%x, new: 0x%x",
7985                             dev->device->name,
7986                             filter_replace.old_filter_type,
7987                             filter_replace.new_filter_type);
7988
7989         /* For MPLSoGRE */
7990         memset(&filter_replace, 0,
7991                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7992         memset(&filter_replace_buf, 0,
7993                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7994
7995         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7996                 I40E_AQC_MIRROR_CLOUD_FILTER;
7997         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7998         filter_replace.new_filter_type =
7999                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8000         /* Prepare the buffer, 2 entries */
8001         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8002         filter_replace_buf.data[0] |=
8003                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8004         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8005         filter_replace_buf.data[4] |=
8006                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8007
8008         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8009                                                &filter_replace_buf);
8010         if (!status && (filter_replace.old_filter_type !=
8011                         filter_replace.new_filter_type))
8012                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8013                             " original: 0x%x, new: 0x%x",
8014                             dev->device->name,
8015                             filter_replace.old_filter_type,
8016                             filter_replace.new_filter_type);
8017
8018         return status;
8019 }
8020
8021 static enum i40e_status_code
8022 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
8023 {
8024         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8025         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8026         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8027         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8028         enum i40e_status_code status = I40E_SUCCESS;
8029
8030         if (pf->support_multi_driver) {
8031                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8032                 return I40E_NOT_SUPPORTED;
8033         }
8034
8035         /* For GTP-C */
8036         memset(&filter_replace, 0,
8037                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8038         memset(&filter_replace_buf, 0,
8039                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8040         /* create L1 filter */
8041         filter_replace.old_filter_type =
8042                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8043         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8044         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8045                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8046         /* Prepare the buffer, 2 entries */
8047         filter_replace_buf.data[0] =
8048                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8049         filter_replace_buf.data[0] |=
8050                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8051         filter_replace_buf.data[2] = 0xFF;
8052         filter_replace_buf.data[3] = 0xFF;
8053         filter_replace_buf.data[4] =
8054                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8055         filter_replace_buf.data[4] |=
8056                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8057         filter_replace_buf.data[6] = 0xFF;
8058         filter_replace_buf.data[7] = 0xFF;
8059         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8060                                                &filter_replace_buf);
8061         if (status < 0)
8062                 return status;
8063         if (filter_replace.old_filter_type !=
8064             filter_replace.new_filter_type)
8065                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8066                             " original: 0x%x, new: 0x%x",
8067                             dev->device->name,
8068                             filter_replace.old_filter_type,
8069                             filter_replace.new_filter_type);
8070
8071         /* for GTP-U */
8072         memset(&filter_replace, 0,
8073                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8074         memset(&filter_replace_buf, 0,
8075                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8076         /* create L1 filter */
8077         filter_replace.old_filter_type =
8078                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8079         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8080         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8081                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8082         /* Prepare the buffer, 2 entries */
8083         filter_replace_buf.data[0] =
8084                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8085         filter_replace_buf.data[0] |=
8086                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8087         filter_replace_buf.data[2] = 0xFF;
8088         filter_replace_buf.data[3] = 0xFF;
8089         filter_replace_buf.data[4] =
8090                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8091         filter_replace_buf.data[4] |=
8092                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8093         filter_replace_buf.data[6] = 0xFF;
8094         filter_replace_buf.data[7] = 0xFF;
8095
8096         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8097                                                &filter_replace_buf);
8098         if (!status && (filter_replace.old_filter_type !=
8099                         filter_replace.new_filter_type))
8100                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8101                             " original: 0x%x, new: 0x%x",
8102                             dev->device->name,
8103                             filter_replace.old_filter_type,
8104                             filter_replace.new_filter_type);
8105
8106         return status;
8107 }
8108
8109 static enum
8110 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8111 {
8112         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8113         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8114         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8115         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8116         enum i40e_status_code status = I40E_SUCCESS;
8117
8118         if (pf->support_multi_driver) {
8119                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8120                 return I40E_NOT_SUPPORTED;
8121         }
8122
8123         /* for GTP-C */
8124         memset(&filter_replace, 0,
8125                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8126         memset(&filter_replace_buf, 0,
8127                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8128         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8129         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8130         filter_replace.new_filter_type =
8131                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8132         /* Prepare the buffer, 2 entries */
8133         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8134         filter_replace_buf.data[0] |=
8135                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8136         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8137         filter_replace_buf.data[4] |=
8138                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8139         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8140                                                &filter_replace_buf);
8141         if (status < 0)
8142                 return status;
8143         if (filter_replace.old_filter_type !=
8144             filter_replace.new_filter_type)
8145                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8146                             " original: 0x%x, new: 0x%x",
8147                             dev->device->name,
8148                             filter_replace.old_filter_type,
8149                             filter_replace.new_filter_type);
8150
8151         /* for GTP-U */
8152         memset(&filter_replace, 0,
8153                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8154         memset(&filter_replace_buf, 0,
8155                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8156         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8157         filter_replace.old_filter_type =
8158                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8159         filter_replace.new_filter_type =
8160                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8161         /* Prepare the buffer, 2 entries */
8162         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8163         filter_replace_buf.data[0] |=
8164                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8165         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8166         filter_replace_buf.data[4] |=
8167                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8168
8169         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8170                                                &filter_replace_buf);
8171         if (!status && (filter_replace.old_filter_type !=
8172                         filter_replace.new_filter_type))
8173                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8174                             " original: 0x%x, new: 0x%x",
8175                             dev->device->name,
8176                             filter_replace.old_filter_type,
8177                             filter_replace.new_filter_type);
8178
8179         return status;
8180 }
8181
8182 static enum i40e_status_code
8183 i40e_replace_port_l1_filter(struct i40e_pf *pf,
8184                             enum i40e_l4_port_type l4_port_type)
8185 {
8186         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8187         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8188         enum i40e_status_code status = I40E_SUCCESS;
8189         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8190         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8191
8192         if (pf->support_multi_driver) {
8193                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8194                 return I40E_NOT_SUPPORTED;
8195         }
8196
8197         memset(&filter_replace, 0,
8198                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8199         memset(&filter_replace_buf, 0,
8200                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8201
8202         /* create L1 filter */
8203         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8204                 filter_replace.old_filter_type =
8205                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8206                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8207                 filter_replace_buf.data[8] =
8208                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8209         } else {
8210                 filter_replace.old_filter_type =
8211                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8212                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8213                 filter_replace_buf.data[8] =
8214                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8215         }
8216
8217         filter_replace.tr_bit = 0;
8218         /* Prepare the buffer, 3 entries */
8219         filter_replace_buf.data[0] =
8220                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8221         filter_replace_buf.data[0] |=
8222                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8223         filter_replace_buf.data[2] = 0x00;
8224         filter_replace_buf.data[3] =
8225                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8226         filter_replace_buf.data[4] =
8227                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8228         filter_replace_buf.data[4] |=
8229                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8230         filter_replace_buf.data[5] = 0x00;
8231         filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8232                 I40E_TR_L4_TYPE_TCP |
8233                 I40E_TR_L4_TYPE_SCTP;
8234         filter_replace_buf.data[7] = 0x00;
8235         filter_replace_buf.data[8] |=
8236                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8237         filter_replace_buf.data[9] = 0x00;
8238         filter_replace_buf.data[10] = 0xFF;
8239         filter_replace_buf.data[11] = 0xFF;
8240
8241         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8242                                                &filter_replace_buf);
8243         if (!status && filter_replace.old_filter_type !=
8244             filter_replace.new_filter_type)
8245                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8246                             " original: 0x%x, new: 0x%x",
8247                             dev->device->name,
8248                             filter_replace.old_filter_type,
8249                             filter_replace.new_filter_type);
8250
8251         return status;
8252 }
8253
8254 static enum i40e_status_code
8255 i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8256                                enum i40e_l4_port_type l4_port_type)
8257 {
8258         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8259         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8260         enum i40e_status_code status = I40E_SUCCESS;
8261         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8262         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8263
8264         if (pf->support_multi_driver) {
8265                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8266                 return I40E_NOT_SUPPORTED;
8267         }
8268
8269         memset(&filter_replace, 0,
8270                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8271         memset(&filter_replace_buf, 0,
8272                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8273
8274         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8275                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8276                 filter_replace.new_filter_type =
8277                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8278                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8279         } else {
8280                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8281                 filter_replace.new_filter_type =
8282                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8283                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8284         }
8285
8286         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8287         filter_replace.tr_bit = 0;
8288         /* Prepare the buffer, 2 entries */
8289         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8290         filter_replace_buf.data[0] |=
8291                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8292         filter_replace_buf.data[4] |=
8293                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8294         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8295                                                &filter_replace_buf);
8296
8297         if (!status && filter_replace.old_filter_type !=
8298             filter_replace.new_filter_type)
8299                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8300                             " original: 0x%x, new: 0x%x",
8301                             dev->device->name,
8302                             filter_replace.old_filter_type,
8303                             filter_replace.new_filter_type);
8304
8305         return status;
8306 }
8307
8308 int
8309 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8310                       struct i40e_tunnel_filter_conf *tunnel_filter,
8311                       uint8_t add)
8312 {
8313         uint16_t ip_type;
8314         uint32_t ipv4_addr, ipv4_addr_le;
8315         uint8_t i, tun_type = 0;
8316         /* internal variable to convert ipv6 byte order */
8317         uint32_t convert_ipv6[4];
8318         int val, ret = 0;
8319         struct i40e_pf_vf *vf = NULL;
8320         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8321         struct i40e_vsi *vsi;
8322         struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8323         struct i40e_aqc_cloud_filters_element_bb *pfilter;
8324         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8325         struct i40e_tunnel_filter *tunnel, *node;
8326         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8327         uint32_t teid_le;
8328         bool big_buffer = 0;
8329
8330         cld_filter = rte_zmalloc("tunnel_filter",
8331                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8332                          0);
8333
8334         if (cld_filter == NULL) {
8335                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8336                 return -ENOMEM;
8337         }
8338         pfilter = cld_filter;
8339
8340         rte_ether_addr_copy(&tunnel_filter->outer_mac,
8341                         (struct rte_ether_addr *)&pfilter->element.outer_mac);
8342         rte_ether_addr_copy(&tunnel_filter->inner_mac,
8343                         (struct rte_ether_addr *)&pfilter->element.inner_mac);
8344
8345         pfilter->element.inner_vlan =
8346                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8347         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8348                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8349                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8350                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8351                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
8352                                 &ipv4_addr_le,
8353                                 sizeof(pfilter->element.ipaddr.v4.data));
8354         } else {
8355                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8356                 for (i = 0; i < 4; i++) {
8357                         convert_ipv6[i] =
8358                         rte_cpu_to_le_32(rte_be_to_cpu_32(
8359                                          tunnel_filter->ip_addr.ipv6_addr[i]));
8360                 }
8361                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
8362                            &convert_ipv6,
8363                            sizeof(pfilter->element.ipaddr.v6.data));
8364         }
8365
8366         /* check tunneled type */
8367         switch (tunnel_filter->tunnel_type) {
8368         case I40E_TUNNEL_TYPE_VXLAN:
8369                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8370                 break;
8371         case I40E_TUNNEL_TYPE_NVGRE:
8372                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8373                 break;
8374         case I40E_TUNNEL_TYPE_IP_IN_GRE:
8375                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8376                 break;
8377         case I40E_TUNNEL_TYPE_MPLSoUDP:
8378                 if (!pf->mpls_replace_flag) {
8379                         i40e_replace_mpls_l1_filter(pf);
8380                         i40e_replace_mpls_cloud_filter(pf);
8381                         pf->mpls_replace_flag = 1;
8382                 }
8383                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8384                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8385                         teid_le >> 4;
8386                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8387                         (teid_le & 0xF) << 12;
8388                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8389                         0x40;
8390                 big_buffer = 1;
8391                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8392                 break;
8393         case I40E_TUNNEL_TYPE_MPLSoGRE:
8394                 if (!pf->mpls_replace_flag) {
8395                         i40e_replace_mpls_l1_filter(pf);
8396                         i40e_replace_mpls_cloud_filter(pf);
8397                         pf->mpls_replace_flag = 1;
8398                 }
8399                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8400                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8401                         teid_le >> 4;
8402                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8403                         (teid_le & 0xF) << 12;
8404                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8405                         0x0;
8406                 big_buffer = 1;
8407                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8408                 break;
8409         case I40E_TUNNEL_TYPE_GTPC:
8410                 if (!pf->gtp_replace_flag) {
8411                         i40e_replace_gtp_l1_filter(pf);
8412                         i40e_replace_gtp_cloud_filter(pf);
8413                         pf->gtp_replace_flag = 1;
8414                 }
8415                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8416                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8417                         (teid_le >> 16) & 0xFFFF;
8418                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8419                         teid_le & 0xFFFF;
8420                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8421                         0x0;
8422                 big_buffer = 1;
8423                 break;
8424         case I40E_TUNNEL_TYPE_GTPU:
8425                 if (!pf->gtp_replace_flag) {
8426                         i40e_replace_gtp_l1_filter(pf);
8427                         i40e_replace_gtp_cloud_filter(pf);
8428                         pf->gtp_replace_flag = 1;
8429                 }
8430                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8431                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8432                         (teid_le >> 16) & 0xFFFF;
8433                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8434                         teid_le & 0xFFFF;
8435                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8436                         0x0;
8437                 big_buffer = 1;
8438                 break;
8439         case I40E_TUNNEL_TYPE_QINQ:
8440                 if (!pf->qinq_replace_flag) {
8441                         ret = i40e_cloud_filter_qinq_create(pf);
8442                         if (ret < 0)
8443                                 PMD_DRV_LOG(DEBUG,
8444                                             "QinQ tunnel filter already created.");
8445                         pf->qinq_replace_flag = 1;
8446                 }
8447                 /*      Add in the General fields the values of
8448                  *      the Outer and Inner VLAN
8449                  *      Big Buffer should be set, see changes in
8450                  *      i40e_aq_add_cloud_filters
8451                  */
8452                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8453                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8454                 big_buffer = 1;
8455                 break;
8456         case I40E_CLOUD_TYPE_UDP:
8457         case I40E_CLOUD_TYPE_TCP:
8458         case I40E_CLOUD_TYPE_SCTP:
8459                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8460                         if (!pf->sport_replace_flag) {
8461                                 i40e_replace_port_l1_filter(pf,
8462                                                 tunnel_filter->l4_port_type);
8463                                 i40e_replace_port_cloud_filter(pf,
8464                                                 tunnel_filter->l4_port_type);
8465                                 pf->sport_replace_flag = 1;
8466                         }
8467                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8468                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8469                                 I40E_DIRECTION_INGRESS_KEY;
8470
8471                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8472                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8473                                         I40E_TR_L4_TYPE_UDP;
8474                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8475                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8476                                         I40E_TR_L4_TYPE_TCP;
8477                         else
8478                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8479                                         I40E_TR_L4_TYPE_SCTP;
8480
8481                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8482                                 (teid_le >> 16) & 0xFFFF;
8483                         big_buffer = 1;
8484                 } else {
8485                         if (!pf->dport_replace_flag) {
8486                                 i40e_replace_port_l1_filter(pf,
8487                                                 tunnel_filter->l4_port_type);
8488                                 i40e_replace_port_cloud_filter(pf,
8489                                                 tunnel_filter->l4_port_type);
8490                                 pf->dport_replace_flag = 1;
8491                         }
8492                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8493                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8494                                 I40E_DIRECTION_INGRESS_KEY;
8495
8496                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8497                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8498                                         I40E_TR_L4_TYPE_UDP;
8499                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8500                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8501                                         I40E_TR_L4_TYPE_TCP;
8502                         else
8503                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8504                                         I40E_TR_L4_TYPE_SCTP;
8505
8506                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8507                                 (teid_le >> 16) & 0xFFFF;
8508                         big_buffer = 1;
8509                 }
8510
8511                 break;
8512         default:
8513                 /* Other tunnel types is not supported. */
8514                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8515                 rte_free(cld_filter);
8516                 return -EINVAL;
8517         }
8518
8519         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8520                 pfilter->element.flags =
8521                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8522         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8523                 pfilter->element.flags =
8524                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8525         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8526                 pfilter->element.flags =
8527                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8528         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8529                 pfilter->element.flags =
8530                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8531         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8532                 pfilter->element.flags |=
8533                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8534         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8535                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8536                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8537                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8538                         pfilter->element.flags |=
8539                                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8540                 else
8541                         pfilter->element.flags |=
8542                                 I40E_AQC_ADD_CLOUD_FILTER_0X10;
8543         } else {
8544                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8545                                                 &pfilter->element.flags);
8546                 if (val < 0) {
8547                         rte_free(cld_filter);
8548                         return -EINVAL;
8549                 }
8550         }
8551
8552         pfilter->element.flags |= rte_cpu_to_le_16(
8553                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8554                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8555         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8556         pfilter->element.queue_number =
8557                 rte_cpu_to_le_16(tunnel_filter->queue_id);
8558
8559         if (!tunnel_filter->is_to_vf)
8560                 vsi = pf->main_vsi;
8561         else {
8562                 if (tunnel_filter->vf_id >= pf->vf_num) {
8563                         PMD_DRV_LOG(ERR, "Invalid argument.");
8564                         rte_free(cld_filter);
8565                         return -EINVAL;
8566                 }
8567                 vf = &pf->vfs[tunnel_filter->vf_id];
8568                 vsi = vf->vsi;
8569         }
8570
8571         /* Check if there is the filter in SW list */
8572         memset(&check_filter, 0, sizeof(check_filter));
8573         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8574         check_filter.is_to_vf = tunnel_filter->is_to_vf;
8575         check_filter.vf_id = tunnel_filter->vf_id;
8576         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8577         if (add && node) {
8578                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8579                 rte_free(cld_filter);
8580                 return -EINVAL;
8581         }
8582
8583         if (!add && !node) {
8584                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8585                 rte_free(cld_filter);
8586                 return -EINVAL;
8587         }
8588
8589         if (add) {
8590                 if (big_buffer)
8591                         ret = i40e_aq_add_cloud_filters_bb(hw,
8592                                                    vsi->seid, cld_filter, 1);
8593                 else
8594                         ret = i40e_aq_add_cloud_filters(hw,
8595                                         vsi->seid, &cld_filter->element, 1);
8596                 if (ret < 0) {
8597                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8598                         rte_free(cld_filter);
8599                         return -ENOTSUP;
8600                 }
8601                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8602                 if (tunnel == NULL) {
8603                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8604                         rte_free(cld_filter);
8605                         return -ENOMEM;
8606                 }
8607
8608                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8609                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8610                 if (ret < 0)
8611                         rte_free(tunnel);
8612         } else {
8613                 if (big_buffer)
8614                         ret = i40e_aq_rem_cloud_filters_bb(
8615                                 hw, vsi->seid, cld_filter, 1);
8616                 else
8617                         ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8618                                                 &cld_filter->element, 1);
8619                 if (ret < 0) {
8620                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8621                         rte_free(cld_filter);
8622                         return -ENOTSUP;
8623                 }
8624                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8625         }
8626
8627         rte_free(cld_filter);
8628         return ret;
8629 }
8630
8631 static int
8632 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8633 {
8634         uint8_t i;
8635
8636         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8637                 if (pf->vxlan_ports[i] == port)
8638                         return i;
8639         }
8640
8641         return -1;
8642 }
8643
8644 static int
8645 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8646 {
8647         int  idx, ret;
8648         uint8_t filter_idx = 0;
8649         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8650
8651         idx = i40e_get_vxlan_port_idx(pf, port);
8652
8653         /* Check if port already exists */
8654         if (idx >= 0) {
8655                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8656                 return -EINVAL;
8657         }
8658
8659         /* Now check if there is space to add the new port */
8660         idx = i40e_get_vxlan_port_idx(pf, 0);
8661         if (idx < 0) {
8662                 PMD_DRV_LOG(ERR,
8663                         "Maximum number of UDP ports reached, not adding port %d",
8664                         port);
8665                 return -ENOSPC;
8666         }
8667
8668         ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8669                                         &filter_idx, NULL);
8670         if (ret < 0) {
8671                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8672                 return -1;
8673         }
8674
8675         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8676                          port,  filter_idx);
8677
8678         /* New port: add it and mark its index in the bitmap */
8679         pf->vxlan_ports[idx] = port;
8680         pf->vxlan_bitmap |= (1 << idx);
8681
8682         if (!(pf->flags & I40E_FLAG_VXLAN))
8683                 pf->flags |= I40E_FLAG_VXLAN;
8684
8685         return 0;
8686 }
8687
8688 static int
8689 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8690 {
8691         int idx;
8692         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8693
8694         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8695                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8696                 return -EINVAL;
8697         }
8698
8699         idx = i40e_get_vxlan_port_idx(pf, port);
8700
8701         if (idx < 0) {
8702                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8703                 return -EINVAL;
8704         }
8705
8706         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8707                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8708                 return -1;
8709         }
8710
8711         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8712                         port, idx);
8713
8714         pf->vxlan_ports[idx] = 0;
8715         pf->vxlan_bitmap &= ~(1 << idx);
8716
8717         if (!pf->vxlan_bitmap)
8718                 pf->flags &= ~I40E_FLAG_VXLAN;
8719
8720         return 0;
8721 }
8722
8723 /* Add UDP tunneling port */
8724 static int
8725 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8726                              struct rte_eth_udp_tunnel *udp_tunnel)
8727 {
8728         int ret = 0;
8729         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8730
8731         if (udp_tunnel == NULL)
8732                 return -EINVAL;
8733
8734         switch (udp_tunnel->prot_type) {
8735         case RTE_TUNNEL_TYPE_VXLAN:
8736                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8737                                           I40E_AQC_TUNNEL_TYPE_VXLAN);
8738                 break;
8739         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8740                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8741                                           I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8742                 break;
8743         case RTE_TUNNEL_TYPE_GENEVE:
8744         case RTE_TUNNEL_TYPE_TEREDO:
8745                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8746                 ret = -1;
8747                 break;
8748
8749         default:
8750                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8751                 ret = -1;
8752                 break;
8753         }
8754
8755         return ret;
8756 }
8757
8758 /* Remove UDP tunneling port */
8759 static int
8760 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8761                              struct rte_eth_udp_tunnel *udp_tunnel)
8762 {
8763         int ret = 0;
8764         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8765
8766         if (udp_tunnel == NULL)
8767                 return -EINVAL;
8768
8769         switch (udp_tunnel->prot_type) {
8770         case RTE_TUNNEL_TYPE_VXLAN:
8771         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8772                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8773                 break;
8774         case RTE_TUNNEL_TYPE_GENEVE:
8775         case RTE_TUNNEL_TYPE_TEREDO:
8776                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8777                 ret = -1;
8778                 break;
8779         default:
8780                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8781                 ret = -1;
8782                 break;
8783         }
8784
8785         return ret;
8786 }
8787
8788 /* Calculate the maximum number of contiguous PF queues that are configured */
8789 int
8790 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8791 {
8792         struct rte_eth_dev_data *data = pf->dev_data;
8793         int i, num;
8794         struct i40e_rx_queue *rxq;
8795
8796         num = 0;
8797         for (i = 0; i < pf->lan_nb_qps; i++) {
8798                 rxq = data->rx_queues[i];
8799                 if (rxq && rxq->q_set)
8800                         num++;
8801                 else
8802                         break;
8803         }
8804
8805         return num;
8806 }
8807
8808 /* Reset the global configure of hash function and input sets */
8809 static void
8810 i40e_pf_global_rss_reset(struct i40e_pf *pf)
8811 {
8812         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8813         uint32_t reg, reg_val;
8814         int i;
8815
8816         /* Reset global RSS function sets */
8817         reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8818         if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) {
8819                 reg_val |= I40E_GLQF_CTL_HTOEP_MASK;
8820                 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val);
8821         }
8822
8823         for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) {
8824                 uint64_t inset;
8825                 int j, pctype;
8826
8827                 if (hw->mac.type == I40E_MAC_X722)
8828                         pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i));
8829                 else
8830                         pctype = i;
8831
8832                 /* Reset pctype insets */
8833                 inset = i40e_get_default_input_set(i);
8834                 if (inset) {
8835                         pf->hash_input_set[pctype] = inset;
8836                         inset = i40e_translate_input_set_reg(hw->mac.type,
8837                                                              inset);
8838
8839                         reg = I40E_GLQF_HASH_INSET(0, pctype);
8840                         i40e_check_write_global_reg(hw, reg, (uint32_t)inset);
8841                         reg = I40E_GLQF_HASH_INSET(1, pctype);
8842                         i40e_check_write_global_reg(hw, reg,
8843                                                     (uint32_t)(inset >> 32));
8844
8845                         /* Clear unused mask registers of the pctype */
8846                         for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) {
8847                                 reg = I40E_GLQF_HASH_MSK(j, pctype);
8848                                 i40e_check_write_global_reg(hw, reg, 0);
8849                         }
8850                 }
8851
8852                 /* Reset pctype symmetric sets */
8853                 reg = I40E_GLQF_HSYM(pctype);
8854                 reg_val = i40e_read_rx_ctl(hw, reg);
8855                 if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8856                         reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
8857                         i40e_write_global_rx_ctl(hw, reg, reg_val);
8858                 }
8859         }
8860         I40E_WRITE_FLUSH(hw);
8861 }
8862
8863 int
8864 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
8865 {
8866         struct i40e_hw *hw = &pf->adapter->hw;
8867         uint8_t lut[ETH_RSS_RETA_SIZE_512];
8868         uint32_t i;
8869         int num;
8870
8871         /* If both VMDQ and RSS enabled, not all of PF queues are
8872          * configured. It's necessary to calculate the actual PF
8873          * queues that are configured.
8874          */
8875         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8876                 num = i40e_pf_calc_configured_queues_num(pf);
8877         else
8878                 num = pf->dev_data->nb_rx_queues;
8879
8880         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8881         if (num <= 0)
8882                 return 0;
8883
8884         for (i = 0; i < hw->func_caps.rss_table_size; i++)
8885                 lut[i] = (uint8_t)(i % (uint32_t)num);
8886
8887         return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
8888 }
8889
8890 int
8891 i40e_pf_reset_rss_key(struct i40e_pf *pf)
8892 {
8893         const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8894                         sizeof(uint32_t);
8895         uint8_t *rss_key;
8896
8897         /* Reset key */
8898         rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key;
8899         if (!rss_key ||
8900             pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) {
8901                 static uint32_t rss_key_default[] = {0x6b793944,
8902                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8903                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8904                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8905
8906                 rss_key = (uint8_t *)rss_key_default;
8907         }
8908
8909         return i40e_set_rss_key(pf->main_vsi, rss_key, key_len);
8910 }
8911
8912 static int
8913 i40e_pf_rss_reset(struct i40e_pf *pf)
8914 {
8915         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8916
8917         int ret;
8918
8919         pf->hash_filter_enabled = 0;
8920         i40e_pf_disable_rss(pf);
8921         i40e_set_symmetric_hash_enable_per_port(hw, 0);
8922
8923         if (!pf->support_multi_driver)
8924                 i40e_pf_global_rss_reset(pf);
8925
8926         /* Reset RETA table */
8927         if (pf->adapter->rss_reta_updated == 0) {
8928                 ret = i40e_pf_reset_rss_reta(pf);
8929                 if (ret)
8930                         return ret;
8931         }
8932
8933         return i40e_pf_reset_rss_key(pf);
8934 }
8935
8936 /* Configure RSS */
8937 int
8938 i40e_pf_config_rss(struct i40e_pf *pf)
8939 {
8940         struct i40e_hw *hw;
8941         enum rte_eth_rx_mq_mode mq_mode;
8942         uint64_t rss_hf, hena;
8943         int ret;
8944
8945         ret = i40e_pf_rss_reset(pf);
8946         if (ret) {
8947                 PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled");
8948                 return ret;
8949         }
8950
8951         rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
8952         mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8953         if (!(rss_hf & pf->adapter->flow_types_mask) ||
8954             !(mq_mode & ETH_MQ_RX_RSS_FLAG))
8955                 return 0;
8956
8957         hw = I40E_PF_TO_HW(pf);
8958         hena = i40e_config_hena(pf->adapter, rss_hf);
8959         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
8960         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
8961         I40E_WRITE_FLUSH(hw);
8962
8963         return 0;
8964 }
8965
8966 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8967 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8968 int
8969 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8970 {
8971         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8972         uint32_t val, reg;
8973         int ret = -EINVAL;
8974
8975         if (pf->support_multi_driver) {
8976                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8977                 return -ENOTSUP;
8978         }
8979
8980         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8981         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8982
8983         if (len == 3) {
8984                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8985         } else if (len == 4) {
8986                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8987         } else {
8988                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8989                 return ret;
8990         }
8991
8992         if (reg != val) {
8993                 ret = i40e_aq_debug_write_global_register(hw,
8994                                                    I40E_GL_PRS_FVBM(2),
8995                                                    reg, NULL);
8996                 if (ret != 0)
8997                         return ret;
8998                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8999                             "with value 0x%08x",
9000                             I40E_GL_PRS_FVBM(2), reg);
9001         } else {
9002                 ret = 0;
9003         }
9004         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
9005                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
9006
9007         return ret;
9008 }
9009
9010 /* Set the symmetric hash enable configurations per port */
9011 void
9012 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
9013 {
9014         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9015
9016         if (enable > 0) {
9017                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)
9018                         return;
9019
9020                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9021         } else {
9022                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK))
9023                         return;
9024
9025                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9026         }
9027         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
9028         I40E_WRITE_FLUSH(hw);
9029 }
9030
9031 /**
9032  * Valid input sets for hash and flow director filters per PCTYPE
9033  */
9034 static uint64_t
9035 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9036                 enum rte_filter_type filter)
9037 {
9038         uint64_t valid;
9039
9040         static const uint64_t valid_hash_inset_table[] = {
9041                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9042                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9043                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9044                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9045                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9046                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9047                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9048                         I40E_INSET_FLEX_PAYLOAD,
9049                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9050                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9051                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9052                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9053                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9054                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9055                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9056                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9057                         I40E_INSET_FLEX_PAYLOAD,
9058                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9059                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9060                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9061                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9062                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9063                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9064                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9065                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9066                         I40E_INSET_FLEX_PAYLOAD,
9067                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9068                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9069                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9070                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9071                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9072                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9073                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9074                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9075                         I40E_INSET_FLEX_PAYLOAD,
9076                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9077                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9078                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9079                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9080                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9081                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9082                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9083                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9084                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9085                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9086                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9087                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9088                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9089                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9090                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9091                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9092                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9093                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9094                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9095                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9096                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9097                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9098                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9099                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9100                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9101                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9102                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9103                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9104                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9105                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9106                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9107                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9108                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9109                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9110                         I40E_INSET_FLEX_PAYLOAD,
9111                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9112                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9113                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9114                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9115                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9116                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9117                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9118                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9119                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9120                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9121                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9122                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9123                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9124                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9125                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9126                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9127                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9128                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9129                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9130                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9131                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9132                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9133                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9134                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9135                         I40E_INSET_FLEX_PAYLOAD,
9136                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9137                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9138                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9139                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9140                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9141                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9142                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9143                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9144                         I40E_INSET_FLEX_PAYLOAD,
9145                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9146                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9147                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9148                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9149                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9150                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9151                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9152                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9153                         I40E_INSET_FLEX_PAYLOAD,
9154                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9155                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9156                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9157                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9158                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9159                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9160                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9161                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9162                         I40E_INSET_FLEX_PAYLOAD,
9163                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9164                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9165                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9166                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9167                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9168                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9169                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9170                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9171                         I40E_INSET_FLEX_PAYLOAD,
9172                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9173                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9174                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9175                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9176                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9177                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9178                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9179                         I40E_INSET_FLEX_PAYLOAD,
9180                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9181                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9182                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9183                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9184                         I40E_INSET_FLEX_PAYLOAD,
9185         };
9186
9187         /**
9188          * Flow director supports only fields defined in
9189          * union rte_eth_fdir_flow.
9190          */
9191         static const uint64_t valid_fdir_inset_table[] = {
9192                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9193                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9194                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9195                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9196                 I40E_INSET_IPV4_TTL,
9197                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9198                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9199                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9200                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9201                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9202                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9203                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9204                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9205                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9206                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9207                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9208                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9209                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9210                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9211                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9212                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9213                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9214                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9215                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9216                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9217                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9218                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9219                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9220                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9221                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9222                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9223                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9224                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9225                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9226                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9227                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9228                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9229                 I40E_INSET_SCTP_VT,
9230                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9231                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9232                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9233                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9234                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9235                 I40E_INSET_IPV4_TTL,
9236                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9237                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9238                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9239                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9240                 I40E_INSET_IPV6_HOP_LIMIT,
9241                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9242                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9243                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9244                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9245                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9246                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9247                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9248                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9249                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9250                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9251                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9252                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9253                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9254                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9255                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9256                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9257                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9258                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9259                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9260                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9261                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9262                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9263                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9264                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9265                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9266                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9267                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9268                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9269                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9270                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9271                 I40E_INSET_SCTP_VT,
9272                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9273                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9274                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9275                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9276                 I40E_INSET_IPV6_HOP_LIMIT,
9277                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9278                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9279                 I40E_INSET_LAST_ETHER_TYPE,
9280         };
9281
9282         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9283                 return 0;
9284         if (filter == RTE_ETH_FILTER_HASH)
9285                 valid = valid_hash_inset_table[pctype];
9286         else
9287                 valid = valid_fdir_inset_table[pctype];
9288
9289         return valid;
9290 }
9291
9292 /**
9293  * Validate if the input set is allowed for a specific PCTYPE
9294  */
9295 int
9296 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9297                 enum rte_filter_type filter, uint64_t inset)
9298 {
9299         uint64_t valid;
9300
9301         valid = i40e_get_valid_input_set(pctype, filter);
9302         if (inset & (~valid))
9303                 return -EINVAL;
9304
9305         return 0;
9306 }
9307
9308 /* default input set fields combination per pctype */
9309 uint64_t
9310 i40e_get_default_input_set(uint16_t pctype)
9311 {
9312         static const uint64_t default_inset_table[] = {
9313                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9314                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9315                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9316                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9317                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9318                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9319                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9320                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9321                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9322                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9323                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9324                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9325                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9326                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9327                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9328                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9329                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9330                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9331                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9332                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9333                         I40E_INSET_SCTP_VT,
9334                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9335                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9336                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9337                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9338                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9339                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9340                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9341                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9342                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9343                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9344                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9345                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9346                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9347                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9348                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9349                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9350                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9351                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9352                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9353                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9354                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9355                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9356                         I40E_INSET_SCTP_VT,
9357                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9358                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9359                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9360                         I40E_INSET_LAST_ETHER_TYPE,
9361         };
9362
9363         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9364                 return 0;
9365
9366         return default_inset_table[pctype];
9367 }
9368
9369 /**
9370  * Translate the input set from bit masks to register aware bit masks
9371  * and vice versa
9372  */
9373 uint64_t
9374 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9375 {
9376         uint64_t val = 0;
9377         uint16_t i;
9378
9379         struct inset_map {
9380                 uint64_t inset;
9381                 uint64_t inset_reg;
9382         };
9383
9384         static const struct inset_map inset_map_common[] = {
9385                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9386                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9387                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9388                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9389                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9390                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9391                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9392                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9393                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9394                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9395                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9396                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9397                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9398                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9399                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9400                 {I40E_INSET_TUNNEL_DMAC,
9401                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9402                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9403                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9404                 {I40E_INSET_TUNNEL_SRC_PORT,
9405                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9406                 {I40E_INSET_TUNNEL_DST_PORT,
9407                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9408                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9409                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9410                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9411                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9412                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9413                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9414                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9415                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9416                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9417         };
9418
9419     /* some different registers map in x722*/
9420         static const struct inset_map inset_map_diff_x722[] = {
9421                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9422                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9423                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9424                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9425         };
9426
9427         static const struct inset_map inset_map_diff_not_x722[] = {
9428                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9429                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9430                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9431                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9432         };
9433
9434         if (input == 0)
9435                 return val;
9436
9437         /* Translate input set to register aware inset */
9438         if (type == I40E_MAC_X722) {
9439                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9440                         if (input & inset_map_diff_x722[i].inset)
9441                                 val |= inset_map_diff_x722[i].inset_reg;
9442                 }
9443         } else {
9444                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9445                         if (input & inset_map_diff_not_x722[i].inset)
9446                                 val |= inset_map_diff_not_x722[i].inset_reg;
9447                 }
9448         }
9449
9450         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9451                 if (input & inset_map_common[i].inset)
9452                         val |= inset_map_common[i].inset_reg;
9453         }
9454
9455         return val;
9456 }
9457
9458 static int
9459 i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start,
9460                             uint32_t pit_reg_count, uint32_t hdr_off)
9461 {
9462         const uint32_t pit_reg_end = pit_reg_start + pit_reg_count;
9463         uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off);
9464         uint32_t i, reg_val, src_off, count;
9465
9466         for (i = pit_reg_start; i < pit_reg_end; i++) {
9467                 reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i));
9468
9469                 src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val);
9470                 count = I40E_GLQF_PIT_FSIZE_GET(reg_val);
9471
9472                 if (src_off <= field_off && (src_off + count) > field_off)
9473                         break;
9474         }
9475
9476         if (i >= pit_reg_end) {
9477                 PMD_DRV_LOG(ERR,
9478                             "Hardware GLQF_PIT configuration does not support this field mask");
9479                 return -1;
9480         }
9481
9482         return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off;
9483 }
9484
9485 int
9486 i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset,
9487                              uint32_t *mask, uint8_t nb_elem)
9488 {
9489         static const uint64_t mask_inset[] = {
9490                 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL,
9491                 I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT };
9492
9493         static const struct {
9494                 uint64_t inset;
9495                 uint32_t mask;
9496                 uint32_t offset;
9497         } inset_mask_offset_map[] = {
9498                 { I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK,
9499                   offsetof(struct rte_ipv4_hdr, type_of_service) },
9500
9501                 { I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK,
9502                   offsetof(struct rte_ipv4_hdr, next_proto_id) },
9503
9504                 { I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK,
9505                   offsetof(struct rte_ipv4_hdr, time_to_live) },
9506
9507                 { I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK,
9508                   offsetof(struct rte_ipv6_hdr, vtc_flow) },
9509
9510                 { I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK,
9511                   offsetof(struct rte_ipv6_hdr, proto) },
9512
9513                 { I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK,
9514                   offsetof(struct rte_ipv6_hdr, hop_limits) },
9515         };
9516
9517         uint32_t i;
9518         int idx = 0;
9519
9520         assert(mask);
9521         if (!inset)
9522                 return 0;
9523
9524         for (i = 0; i < RTE_DIM(mask_inset); i++) {
9525                 /* Clear the inset bit, if no MASK is required,
9526                  * for example proto + ttl
9527                  */
9528                 if ((mask_inset[i] & inset) == mask_inset[i]) {
9529                         inset &= ~mask_inset[i];
9530                         if (!inset)
9531                                 return 0;
9532                 }
9533         }
9534
9535         for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) {
9536                 uint32_t pit_start, pit_count;
9537                 int offset;
9538
9539                 if (!(inset_mask_offset_map[i].inset & inset))
9540                         continue;
9541
9542                 if (inset_mask_offset_map[i].inset &
9543                     (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9544                      I40E_INSET_IPV4_TTL)) {
9545                         pit_start = I40E_GLQF_PIT_IPV4_START;
9546                         pit_count = I40E_GLQF_PIT_IPV4_COUNT;
9547                 } else {
9548                         pit_start = I40E_GLQF_PIT_IPV6_START;
9549                         pit_count = I40E_GLQF_PIT_IPV6_COUNT;
9550                 }
9551
9552                 offset = i40e_get_inset_field_offset(hw, pit_start, pit_count,
9553                                 inset_mask_offset_map[i].offset);
9554
9555                 if (offset < 0)
9556                         return -EINVAL;
9557
9558                 if (idx >= nb_elem) {
9559                         PMD_DRV_LOG(ERR,
9560                                     "Configuration of inset mask out of range %u",
9561                                     nb_elem);
9562                         return -ERANGE;
9563                 }
9564
9565                 mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset,
9566                                                 inset_mask_offset_map[i].mask);
9567                 idx++;
9568         }
9569
9570         return idx;
9571 }
9572
9573 void
9574 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9575 {
9576         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9577
9578         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9579         if (reg != val)
9580                 i40e_write_rx_ctl(hw, addr, val);
9581         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9582                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9583 }
9584
9585 void
9586 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9587 {
9588         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9589         struct rte_eth_dev_data *dev_data =
9590                 ((struct i40e_adapter *)hw->back)->pf.dev_data;
9591         struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
9592
9593         if (reg != val) {
9594                 i40e_write_rx_ctl(hw, addr, val);
9595                 PMD_DRV_LOG(WARNING,
9596                             "i40e device %s changed global register [0x%08x]."
9597                             " original: 0x%08x, new: 0x%08x",
9598                             dev->device->name, addr, reg,
9599                             (uint32_t)i40e_read_rx_ctl(hw, addr));
9600         }
9601 }
9602
9603 static void
9604 i40e_filter_input_set_init(struct i40e_pf *pf)
9605 {
9606         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9607         enum i40e_filter_pctype pctype;
9608         uint64_t input_set, inset_reg;
9609         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9610         int num, i;
9611         uint16_t flow_type;
9612
9613         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9614              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9615                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9616
9617                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9618                         continue;
9619
9620                 input_set = i40e_get_default_input_set(pctype);
9621
9622                 num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9623                                                    I40E_INSET_MASK_NUM_REG);
9624                 if (num < 0)
9625                         return;
9626                 if (pf->support_multi_driver && num > 0) {
9627                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9628                         return;
9629                 }
9630                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9631                                         input_set);
9632
9633                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9634                                       (uint32_t)(inset_reg & UINT32_MAX));
9635                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9636                                      (uint32_t)((inset_reg >>
9637                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
9638                 if (!pf->support_multi_driver) {
9639                         i40e_check_write_global_reg(hw,
9640                                             I40E_GLQF_HASH_INSET(0, pctype),
9641                                             (uint32_t)(inset_reg & UINT32_MAX));
9642                         i40e_check_write_global_reg(hw,
9643                                              I40E_GLQF_HASH_INSET(1, pctype),
9644                                              (uint32_t)((inset_reg >>
9645                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
9646
9647                         for (i = 0; i < num; i++) {
9648                                 i40e_check_write_global_reg(hw,
9649                                                     I40E_GLQF_FD_MSK(i, pctype),
9650                                                     mask_reg[i]);
9651                                 i40e_check_write_global_reg(hw,
9652                                                   I40E_GLQF_HASH_MSK(i, pctype),
9653                                                   mask_reg[i]);
9654                         }
9655                         /*clear unused mask registers of the pctype */
9656                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9657                                 i40e_check_write_global_reg(hw,
9658                                                     I40E_GLQF_FD_MSK(i, pctype),
9659                                                     0);
9660                                 i40e_check_write_global_reg(hw,
9661                                                   I40E_GLQF_HASH_MSK(i, pctype),
9662                                                   0);
9663                         }
9664                 } else {
9665                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9666                 }
9667                 I40E_WRITE_FLUSH(hw);
9668
9669                 /* store the default input set */
9670                 if (!pf->support_multi_driver)
9671                         pf->hash_input_set[pctype] = input_set;
9672                 pf->fdir.input_set[pctype] = input_set;
9673         }
9674 }
9675
9676 int
9677 i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
9678                     uint32_t pctype, bool add)
9679 {
9680         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9681         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9682         uint64_t inset_reg = 0;
9683         int num, i;
9684
9685         if (pf->support_multi_driver) {
9686                 PMD_DRV_LOG(ERR,
9687                             "Modify input set is not permitted when multi-driver enabled.");
9688                 return -EPERM;
9689         }
9690
9691         /* For X722, get translated pctype in fd pctype register */
9692         if (hw->mac.type == I40E_MAC_X722)
9693                 pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
9694
9695         if (add) {
9696                 /* get inset value in register */
9697                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9698                 inset_reg <<= I40E_32_BIT_WIDTH;
9699                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9700                 input_set |= pf->hash_input_set[pctype];
9701         }
9702         num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9703                                            I40E_INSET_MASK_NUM_REG);
9704         if (num < 0)
9705                 return -EINVAL;
9706
9707         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9708
9709         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9710                                     (uint32_t)(inset_reg & UINT32_MAX));
9711         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9712                                     (uint32_t)((inset_reg >>
9713                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
9714
9715         for (i = 0; i < num; i++)
9716                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9717                                             mask_reg[i]);
9718         /*clear unused mask registers of the pctype */
9719         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9720                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9721                                             0);
9722         I40E_WRITE_FLUSH(hw);
9723
9724         pf->hash_input_set[pctype] = input_set;
9725         return 0;
9726 }
9727
9728 /* Convert ethertype filter structure */
9729 static int
9730 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9731                               struct i40e_ethertype_filter *filter)
9732 {
9733         rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
9734                 RTE_ETHER_ADDR_LEN);
9735         filter->input.ether_type = input->ether_type;
9736         filter->flags = input->flags;
9737         filter->queue = input->queue;
9738
9739         return 0;
9740 }
9741
9742 /* Check if there exists the ehtertype filter */
9743 struct i40e_ethertype_filter *
9744 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9745                                 const struct i40e_ethertype_filter_input *input)
9746 {
9747         int ret;
9748
9749         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9750         if (ret < 0)
9751                 return NULL;
9752
9753         return ethertype_rule->hash_map[ret];
9754 }
9755
9756 /* Add ethertype filter in SW list */
9757 static int
9758 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9759                                 struct i40e_ethertype_filter *filter)
9760 {
9761         struct i40e_ethertype_rule *rule = &pf->ethertype;
9762         int ret;
9763
9764         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9765         if (ret < 0) {
9766                 PMD_DRV_LOG(ERR,
9767                             "Failed to insert ethertype filter"
9768                             " to hash table %d!",
9769                             ret);
9770                 return ret;
9771         }
9772         rule->hash_map[ret] = filter;
9773
9774         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9775
9776         return 0;
9777 }
9778
9779 /* Delete ethertype filter in SW list */
9780 int
9781 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9782                              struct i40e_ethertype_filter_input *input)
9783 {
9784         struct i40e_ethertype_rule *rule = &pf->ethertype;
9785         struct i40e_ethertype_filter *filter;
9786         int ret;
9787
9788         ret = rte_hash_del_key(rule->hash_table, input);
9789         if (ret < 0) {
9790                 PMD_DRV_LOG(ERR,
9791                             "Failed to delete ethertype filter"
9792                             " to hash table %d!",
9793                             ret);
9794                 return ret;
9795         }
9796         filter = rule->hash_map[ret];
9797         rule->hash_map[ret] = NULL;
9798
9799         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9800         rte_free(filter);
9801
9802         return 0;
9803 }
9804
9805 /*
9806  * Configure ethertype filter, which can director packet by filtering
9807  * with mac address and ether_type or only ether_type
9808  */
9809 int
9810 i40e_ethertype_filter_set(struct i40e_pf *pf,
9811                         struct rte_eth_ethertype_filter *filter,
9812                         bool add)
9813 {
9814         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9815         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9816         struct i40e_ethertype_filter *ethertype_filter, *node;
9817         struct i40e_ethertype_filter check_filter;
9818         struct i40e_control_filter_stats stats;
9819         uint16_t flags = 0;
9820         int ret;
9821
9822         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9823                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9824                 return -EINVAL;
9825         }
9826         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
9827                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
9828                 PMD_DRV_LOG(ERR,
9829                         "unsupported ether_type(0x%04x) in control packet filter.",
9830                         filter->ether_type);
9831                 return -EINVAL;
9832         }
9833         if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
9834                 PMD_DRV_LOG(WARNING,
9835                         "filter vlan ether_type in first tag is not supported.");
9836
9837         /* Check if there is the filter in SW list */
9838         memset(&check_filter, 0, sizeof(check_filter));
9839         i40e_ethertype_filter_convert(filter, &check_filter);
9840         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9841                                                &check_filter.input);
9842         if (add && node) {
9843                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9844                 return -EINVAL;
9845         }
9846
9847         if (!add && !node) {
9848                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9849                 return -EINVAL;
9850         }
9851
9852         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9853                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9854         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9855                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9856         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9857
9858         memset(&stats, 0, sizeof(stats));
9859         ret = i40e_aq_add_rem_control_packet_filter(hw,
9860                         filter->mac_addr.addr_bytes,
9861                         filter->ether_type, flags,
9862                         pf->main_vsi->seid,
9863                         filter->queue, add, &stats, NULL);
9864
9865         PMD_DRV_LOG(INFO,
9866                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9867                 ret, stats.mac_etype_used, stats.etype_used,
9868                 stats.mac_etype_free, stats.etype_free);
9869         if (ret < 0)
9870                 return -ENOSYS;
9871
9872         /* Add or delete a filter in SW list */
9873         if (add) {
9874                 ethertype_filter = rte_zmalloc("ethertype_filter",
9875                                        sizeof(*ethertype_filter), 0);
9876                 if (ethertype_filter == NULL) {
9877                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9878                         return -ENOMEM;
9879                 }
9880
9881                 rte_memcpy(ethertype_filter, &check_filter,
9882                            sizeof(check_filter));
9883                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9884                 if (ret < 0)
9885                         rte_free(ethertype_filter);
9886         } else {
9887                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9888         }
9889
9890         return ret;
9891 }
9892
9893 static int
9894 i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
9895                       const struct rte_flow_ops **ops)
9896 {
9897         if (dev == NULL)
9898                 return -EINVAL;
9899
9900         *ops = &i40e_flow_ops;
9901         return 0;
9902 }
9903
9904 /*
9905  * Check and enable Extended Tag.
9906  * Enabling Extended Tag is important for 40G performance.
9907  */
9908 static void
9909 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9910 {
9911         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9912         uint32_t buf = 0;
9913         int ret;
9914
9915         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9916                                       PCI_DEV_CAP_REG);
9917         if (ret < 0) {
9918                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9919                             PCI_DEV_CAP_REG);
9920                 return;
9921         }
9922         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9923                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9924                 return;
9925         }
9926
9927         buf = 0;
9928         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9929                                       PCI_DEV_CTRL_REG);
9930         if (ret < 0) {
9931                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9932                             PCI_DEV_CTRL_REG);
9933                 return;
9934         }
9935         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9936                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9937                 return;
9938         }
9939         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9940         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9941                                        PCI_DEV_CTRL_REG);
9942         if (ret < 0) {
9943                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9944                             PCI_DEV_CTRL_REG);
9945                 return;
9946         }
9947 }
9948
9949 /*
9950  * As some registers wouldn't be reset unless a global hardware reset,
9951  * hardware initialization is needed to put those registers into an
9952  * expected initial state.
9953  */
9954 static void
9955 i40e_hw_init(struct rte_eth_dev *dev)
9956 {
9957         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9958
9959         i40e_enable_extended_tag(dev);
9960
9961         /* clear the PF Queue Filter control register */
9962         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9963
9964         /* Disable symmetric hash per port */
9965         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9966 }
9967
9968 /*
9969  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9970  * however this function will return only one highest pctype index,
9971  * which is not quite correct. This is known problem of i40e driver
9972  * and needs to be fixed later.
9973  */
9974 enum i40e_filter_pctype
9975 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9976 {
9977         int i;
9978         uint64_t pctype_mask;
9979
9980         if (flow_type < I40E_FLOW_TYPE_MAX) {
9981                 pctype_mask = adapter->pctypes_tbl[flow_type];
9982                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9983                         if (pctype_mask & (1ULL << i))
9984                                 return (enum i40e_filter_pctype)i;
9985                 }
9986         }
9987         return I40E_FILTER_PCTYPE_INVALID;
9988 }
9989
9990 uint16_t
9991 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9992                         enum i40e_filter_pctype pctype)
9993 {
9994         uint16_t flowtype;
9995         uint64_t pctype_mask = 1ULL << pctype;
9996
9997         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9998              flowtype++) {
9999                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
10000                         return flowtype;
10001         }
10002
10003         return RTE_ETH_FLOW_UNKNOWN;
10004 }
10005
10006 /*
10007  * On X710, performance number is far from the expectation on recent firmware
10008  * versions; on XL710, performance number is also far from the expectation on
10009  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
10010  * mode is enabled and port MAC address is equal to the packet destination MAC
10011  * address. The fix for this issue may not be integrated in the following
10012  * firmware version. So the workaround in software driver is needed. It needs
10013  * to modify the initial values of 3 internal only registers for both X710 and
10014  * XL710. Note that the values for X710 or XL710 could be different, and the
10015  * workaround can be removed when it is fixed in firmware in the future.
10016  */
10017
10018 /* For both X710 and XL710 */
10019 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
10020 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
10021 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
10022
10023 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10024 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
10025
10026 /* For X722 */
10027 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10028 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10029
10030 /* For X710 */
10031 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10032 /* For XL710 */
10033 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10034 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10035
10036 /*
10037  * GL_SWR_PM_UP_THR:
10038  * The value is not impacted from the link speed, its value is set according
10039  * to the total number of ports for a better pipe-monitor configuration.
10040  */
10041 static bool
10042 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10043 {
10044 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10045                 .device_id = (dev),   \
10046                 .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10047
10048 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10049                 .device_id = (dev),   \
10050                 .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10051
10052         static const struct {
10053                 uint16_t device_id;
10054                 uint32_t val;
10055         } swr_pm_table[] = {
10056                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10057                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10058                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10059                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10060                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10061
10062                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10063                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10064                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10065                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10066                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10067                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10068                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10069         };
10070         uint32_t i;
10071
10072         if (value == NULL) {
10073                 PMD_DRV_LOG(ERR, "value is NULL");
10074                 return false;
10075         }
10076
10077         for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10078                 if (hw->device_id == swr_pm_table[i].device_id) {
10079                         *value = swr_pm_table[i].val;
10080
10081                         PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10082                                     "value - 0x%08x",
10083                                     hw->device_id, *value);
10084                         return true;
10085                 }
10086         }
10087
10088         return false;
10089 }
10090
10091 static int
10092 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10093 {
10094         enum i40e_status_code status;
10095         struct i40e_aq_get_phy_abilities_resp phy_ab;
10096         int ret = -ENOTSUP;
10097         int retries = 0;
10098
10099         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10100                                               NULL);
10101
10102         while (status) {
10103                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10104                         status);
10105                 retries++;
10106                 rte_delay_us(100000);
10107                 if  (retries < 5)
10108                         status = i40e_aq_get_phy_capabilities(hw, false,
10109                                         true, &phy_ab, NULL);
10110                 else
10111                         return ret;
10112         }
10113         return 0;
10114 }
10115
10116 static void
10117 i40e_configure_registers(struct i40e_hw *hw)
10118 {
10119         static struct {
10120                 uint32_t addr;
10121                 uint64_t val;
10122         } reg_table[] = {
10123                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10124                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10125                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10126         };
10127         uint64_t reg;
10128         uint32_t i;
10129         int ret;
10130
10131         for (i = 0; i < RTE_DIM(reg_table); i++) {
10132                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10133                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10134                                 reg_table[i].val =
10135                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10136                         else /* For X710/XL710/XXV710 */
10137                                 if (hw->aq.fw_maj_ver < 6)
10138                                         reg_table[i].val =
10139                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10140                                 else
10141                                         reg_table[i].val =
10142                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10143                 }
10144
10145                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10146                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10147                                 reg_table[i].val =
10148                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10149                         else /* For X710/XL710/XXV710 */
10150                                 reg_table[i].val =
10151                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10152                 }
10153
10154                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10155                         uint32_t cfg_val;
10156
10157                         if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10158                                 PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10159                                             "GL_SWR_PM_UP_THR value fixup",
10160                                             hw->device_id);
10161                                 continue;
10162                         }
10163
10164                         reg_table[i].val = cfg_val;
10165                 }
10166
10167                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10168                                                         &reg, NULL);
10169                 if (ret < 0) {
10170                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10171                                                         reg_table[i].addr);
10172                         break;
10173                 }
10174                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10175                                                 reg_table[i].addr, reg);
10176                 if (reg == reg_table[i].val)
10177                         continue;
10178
10179                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10180                                                 reg_table[i].val, NULL);
10181                 if (ret < 0) {
10182                         PMD_DRV_LOG(ERR,
10183                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10184                                 reg_table[i].val, reg_table[i].addr);
10185                         break;
10186                 }
10187                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10188                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10189         }
10190 }
10191
10192 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10193 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10194 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10195 static int
10196 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10197 {
10198         uint32_t reg;
10199         int ret;
10200
10201         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10202                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10203                 return -EINVAL;
10204         }
10205
10206         /* Configure for double VLAN RX stripping */
10207         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10208         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10209                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10210                 ret = i40e_aq_debug_write_register(hw,
10211                                                    I40E_VSI_TSR(vsi->vsi_id),
10212                                                    reg, NULL);
10213                 if (ret < 0) {
10214                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10215                                     vsi->vsi_id);
10216                         return I40E_ERR_CONFIG;
10217                 }
10218         }
10219
10220         /* Configure for double VLAN TX insertion */
10221         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10222         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10223                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10224                 ret = i40e_aq_debug_write_register(hw,
10225                                                    I40E_VSI_L2TAGSTXVALID(
10226                                                    vsi->vsi_id), reg, NULL);
10227                 if (ret < 0) {
10228                         PMD_DRV_LOG(ERR,
10229                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
10230                                 vsi->vsi_id);
10231                         return I40E_ERR_CONFIG;
10232                 }
10233         }
10234
10235         return 0;
10236 }
10237
10238 /**
10239  * i40e_aq_add_mirror_rule
10240  * @hw: pointer to the hardware structure
10241  * @seid: VEB seid to add mirror rule to
10242  * @dst_id: destination vsi seid
10243  * @entries: Buffer which contains the entities to be mirrored
10244  * @count: number of entities contained in the buffer
10245  * @rule_id:the rule_id of the rule to be added
10246  *
10247  * Add a mirror rule for a given veb.
10248  *
10249  **/
10250 static enum i40e_status_code
10251 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10252                         uint16_t seid, uint16_t dst_id,
10253                         uint16_t rule_type, uint16_t *entries,
10254                         uint16_t count, uint16_t *rule_id)
10255 {
10256         struct i40e_aq_desc desc;
10257         struct i40e_aqc_add_delete_mirror_rule cmd;
10258         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10259                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
10260                 &desc.params.raw;
10261         uint16_t buff_len;
10262         enum i40e_status_code status;
10263
10264         i40e_fill_default_direct_cmd_desc(&desc,
10265                                           i40e_aqc_opc_add_mirror_rule);
10266         memset(&cmd, 0, sizeof(cmd));
10267
10268         buff_len = sizeof(uint16_t) * count;
10269         desc.datalen = rte_cpu_to_le_16(buff_len);
10270         if (buff_len > 0)
10271                 desc.flags |= rte_cpu_to_le_16(
10272                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10273         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10274                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10275         cmd.num_entries = rte_cpu_to_le_16(count);
10276         cmd.seid = rte_cpu_to_le_16(seid);
10277         cmd.destination = rte_cpu_to_le_16(dst_id);
10278
10279         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10280         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10281         PMD_DRV_LOG(INFO,
10282                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10283                 hw->aq.asq_last_status, resp->rule_id,
10284                 resp->mirror_rules_used, resp->mirror_rules_free);
10285         *rule_id = rte_le_to_cpu_16(resp->rule_id);
10286
10287         return status;
10288 }
10289
10290 /**
10291  * i40e_aq_del_mirror_rule
10292  * @hw: pointer to the hardware structure
10293  * @seid: VEB seid to add mirror rule to
10294  * @entries: Buffer which contains the entities to be mirrored
10295  * @count: number of entities contained in the buffer
10296  * @rule_id:the rule_id of the rule to be delete
10297  *
10298  * Delete a mirror rule for a given veb.
10299  *
10300  **/
10301 static enum i40e_status_code
10302 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10303                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10304                 uint16_t count, uint16_t rule_id)
10305 {
10306         struct i40e_aq_desc desc;
10307         struct i40e_aqc_add_delete_mirror_rule cmd;
10308         uint16_t buff_len = 0;
10309         enum i40e_status_code status;
10310         void *buff = NULL;
10311
10312         i40e_fill_default_direct_cmd_desc(&desc,
10313                                           i40e_aqc_opc_delete_mirror_rule);
10314         memset(&cmd, 0, sizeof(cmd));
10315         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10316                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10317                                                           I40E_AQ_FLAG_RD));
10318                 cmd.num_entries = count;
10319                 buff_len = sizeof(uint16_t) * count;
10320                 desc.datalen = rte_cpu_to_le_16(buff_len);
10321                 buff = (void *)entries;
10322         } else
10323                 /* rule id is filled in destination field for deleting mirror rule */
10324                 cmd.destination = rte_cpu_to_le_16(rule_id);
10325
10326         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10327                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10328         cmd.seid = rte_cpu_to_le_16(seid);
10329
10330         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10331         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10332
10333         return status;
10334 }
10335
10336 /**
10337  * i40e_mirror_rule_set
10338  * @dev: pointer to the hardware structure
10339  * @mirror_conf: mirror rule info
10340  * @sw_id: mirror rule's sw_id
10341  * @on: enable/disable
10342  *
10343  * set a mirror rule.
10344  *
10345  **/
10346 static int
10347 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10348                         struct rte_eth_mirror_conf *mirror_conf,
10349                         uint8_t sw_id, uint8_t on)
10350 {
10351         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10352         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10353         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10354         struct i40e_mirror_rule *parent = NULL;
10355         uint16_t seid, dst_seid, rule_id;
10356         uint16_t i, j = 0;
10357         int ret;
10358
10359         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10360
10361         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10362                 PMD_DRV_LOG(ERR,
10363                         "mirror rule can not be configured without veb or vfs.");
10364                 return -ENOSYS;
10365         }
10366         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10367                 PMD_DRV_LOG(ERR, "mirror table is full.");
10368                 return -ENOSPC;
10369         }
10370         if (mirror_conf->dst_pool > pf->vf_num) {
10371                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10372                                  mirror_conf->dst_pool);
10373                 return -EINVAL;
10374         }
10375
10376         seid = pf->main_vsi->veb->seid;
10377
10378         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10379                 if (sw_id <= it->index) {
10380                         mirr_rule = it;
10381                         break;
10382                 }
10383                 parent = it;
10384         }
10385         if (mirr_rule && sw_id == mirr_rule->index) {
10386                 if (on) {
10387                         PMD_DRV_LOG(ERR, "mirror rule exists.");
10388                         return -EEXIST;
10389                 } else {
10390                         ret = i40e_aq_del_mirror_rule(hw, seid,
10391                                         mirr_rule->rule_type,
10392                                         mirr_rule->entries,
10393                                         mirr_rule->num_entries, mirr_rule->id);
10394                         if (ret < 0) {
10395                                 PMD_DRV_LOG(ERR,
10396                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
10397                                         ret, hw->aq.asq_last_status);
10398                                 return -ENOSYS;
10399                         }
10400                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10401                         rte_free(mirr_rule);
10402                         pf->nb_mirror_rule--;
10403                         return 0;
10404                 }
10405         } else if (!on) {
10406                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10407                 return -ENOENT;
10408         }
10409
10410         mirr_rule = rte_zmalloc("i40e_mirror_rule",
10411                                 sizeof(struct i40e_mirror_rule) , 0);
10412         if (!mirr_rule) {
10413                 PMD_DRV_LOG(ERR, "failed to allocate memory");
10414                 return I40E_ERR_NO_MEMORY;
10415         }
10416         switch (mirror_conf->rule_type) {
10417         case ETH_MIRROR_VLAN:
10418                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10419                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10420                                 mirr_rule->entries[j] =
10421                                         mirror_conf->vlan.vlan_id[i];
10422                                 j++;
10423                         }
10424                 }
10425                 if (j == 0) {
10426                         PMD_DRV_LOG(ERR, "vlan is not specified.");
10427                         rte_free(mirr_rule);
10428                         return -EINVAL;
10429                 }
10430                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10431                 break;
10432         case ETH_MIRROR_VIRTUAL_POOL_UP:
10433         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10434                 /* check if the specified pool bit is out of range */
10435                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10436                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
10437                         rte_free(mirr_rule);
10438                         return -EINVAL;
10439                 }
10440                 for (i = 0, j = 0; i < pf->vf_num; i++) {
10441                         if (mirror_conf->pool_mask & (1ULL << i)) {
10442                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10443                                 j++;
10444                         }
10445                 }
10446                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10447                         /* add pf vsi to entries */
10448                         mirr_rule->entries[j] = pf->main_vsi_seid;
10449                         j++;
10450                 }
10451                 if (j == 0) {
10452                         PMD_DRV_LOG(ERR, "pool is not specified.");
10453                         rte_free(mirr_rule);
10454                         return -EINVAL;
10455                 }
10456                 /* egress and ingress in aq commands means from switch but not port */
10457                 mirr_rule->rule_type =
10458                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10459                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10460                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10461                 break;
10462         case ETH_MIRROR_UPLINK_PORT:
10463                 /* egress and ingress in aq commands means from switch but not port*/
10464                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10465                 break;
10466         case ETH_MIRROR_DOWNLINK_PORT:
10467                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10468                 break;
10469         default:
10470                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10471                         mirror_conf->rule_type);
10472                 rte_free(mirr_rule);
10473                 return -EINVAL;
10474         }
10475
10476         /* If the dst_pool is equal to vf_num, consider it as PF */
10477         if (mirror_conf->dst_pool == pf->vf_num)
10478                 dst_seid = pf->main_vsi_seid;
10479         else
10480                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10481
10482         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10483                                       mirr_rule->rule_type, mirr_rule->entries,
10484                                       j, &rule_id);
10485         if (ret < 0) {
10486                 PMD_DRV_LOG(ERR,
10487                         "failed to add mirror rule: ret = %d, aq_err = %d.",
10488                         ret, hw->aq.asq_last_status);
10489                 rte_free(mirr_rule);
10490                 return -ENOSYS;
10491         }
10492
10493         mirr_rule->index = sw_id;
10494         mirr_rule->num_entries = j;
10495         mirr_rule->id = rule_id;
10496         mirr_rule->dst_vsi_seid = dst_seid;
10497
10498         if (parent)
10499                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10500         else
10501                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10502
10503         pf->nb_mirror_rule++;
10504         return 0;
10505 }
10506
10507 /**
10508  * i40e_mirror_rule_reset
10509  * @dev: pointer to the device
10510  * @sw_id: mirror rule's sw_id
10511  *
10512  * reset a mirror rule.
10513  *
10514  **/
10515 static int
10516 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10517 {
10518         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10519         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10520         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10521         uint16_t seid;
10522         int ret;
10523
10524         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10525
10526         seid = pf->main_vsi->veb->seid;
10527
10528         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10529                 if (sw_id == it->index) {
10530                         mirr_rule = it;
10531                         break;
10532                 }
10533         }
10534         if (mirr_rule) {
10535                 ret = i40e_aq_del_mirror_rule(hw, seid,
10536                                 mirr_rule->rule_type,
10537                                 mirr_rule->entries,
10538                                 mirr_rule->num_entries, mirr_rule->id);
10539                 if (ret < 0) {
10540                         PMD_DRV_LOG(ERR,
10541                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
10542                                 ret, hw->aq.asq_last_status);
10543                         return -ENOSYS;
10544                 }
10545                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10546                 rte_free(mirr_rule);
10547                 pf->nb_mirror_rule--;
10548         } else {
10549                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10550                 return -ENOENT;
10551         }
10552         return 0;
10553 }
10554
10555 static uint64_t
10556 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10557 {
10558         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10559         uint64_t systim_cycles;
10560
10561         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10562         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10563                         << 32;
10564
10565         return systim_cycles;
10566 }
10567
10568 static uint64_t
10569 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10570 {
10571         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10572         uint64_t rx_tstamp;
10573
10574         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10575         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10576                         << 32;
10577
10578         return rx_tstamp;
10579 }
10580
10581 static uint64_t
10582 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10583 {
10584         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10585         uint64_t tx_tstamp;
10586
10587         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10588         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10589                         << 32;
10590
10591         return tx_tstamp;
10592 }
10593
10594 static void
10595 i40e_start_timecounters(struct rte_eth_dev *dev)
10596 {
10597         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10598         struct i40e_adapter *adapter = dev->data->dev_private;
10599         struct rte_eth_link link;
10600         uint32_t tsync_inc_l;
10601         uint32_t tsync_inc_h;
10602
10603         /* Get current link speed. */
10604         i40e_dev_link_update(dev, 1);
10605         rte_eth_linkstatus_get(dev, &link);
10606
10607         switch (link.link_speed) {
10608         case ETH_SPEED_NUM_40G:
10609         case ETH_SPEED_NUM_25G:
10610                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10611                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10612                 break;
10613         case ETH_SPEED_NUM_10G:
10614                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10615                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10616                 break;
10617         case ETH_SPEED_NUM_1G:
10618                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10619                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10620                 break;
10621         default:
10622                 tsync_inc_l = 0x0;
10623                 tsync_inc_h = 0x0;
10624         }
10625
10626         /* Set the timesync increment value. */
10627         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10628         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10629
10630         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10631         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10632         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10633
10634         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10635         adapter->systime_tc.cc_shift = 0;
10636         adapter->systime_tc.nsec_mask = 0;
10637
10638         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10639         adapter->rx_tstamp_tc.cc_shift = 0;
10640         adapter->rx_tstamp_tc.nsec_mask = 0;
10641
10642         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10643         adapter->tx_tstamp_tc.cc_shift = 0;
10644         adapter->tx_tstamp_tc.nsec_mask = 0;
10645 }
10646
10647 static int
10648 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10649 {
10650         struct i40e_adapter *adapter = dev->data->dev_private;
10651
10652         adapter->systime_tc.nsec += delta;
10653         adapter->rx_tstamp_tc.nsec += delta;
10654         adapter->tx_tstamp_tc.nsec += delta;
10655
10656         return 0;
10657 }
10658
10659 static int
10660 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10661 {
10662         uint64_t ns;
10663         struct i40e_adapter *adapter = dev->data->dev_private;
10664
10665         ns = rte_timespec_to_ns(ts);
10666
10667         /* Set the timecounters to a new value. */
10668         adapter->systime_tc.nsec = ns;
10669         adapter->rx_tstamp_tc.nsec = ns;
10670         adapter->tx_tstamp_tc.nsec = ns;
10671
10672         return 0;
10673 }
10674
10675 static int
10676 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10677 {
10678         uint64_t ns, systime_cycles;
10679         struct i40e_adapter *adapter = dev->data->dev_private;
10680
10681         systime_cycles = i40e_read_systime_cyclecounter(dev);
10682         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10683         *ts = rte_ns_to_timespec(ns);
10684
10685         return 0;
10686 }
10687
10688 static int
10689 i40e_timesync_enable(struct rte_eth_dev *dev)
10690 {
10691         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10692         uint32_t tsync_ctl_l;
10693         uint32_t tsync_ctl_h;
10694
10695         /* Stop the timesync system time. */
10696         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10697         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10698         /* Reset the timesync system time value. */
10699         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10700         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10701
10702         i40e_start_timecounters(dev);
10703
10704         /* Clear timesync registers. */
10705         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10706         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10707         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10708         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10709         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10710         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10711
10712         /* Enable timestamping of PTP packets. */
10713         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10714         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10715
10716         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10717         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10718         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10719
10720         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10721         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10722
10723         return 0;
10724 }
10725
10726 static int
10727 i40e_timesync_disable(struct rte_eth_dev *dev)
10728 {
10729         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10730         uint32_t tsync_ctl_l;
10731         uint32_t tsync_ctl_h;
10732
10733         /* Disable timestamping of transmitted PTP packets. */
10734         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10735         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10736
10737         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10738         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10739
10740         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10741         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10742
10743         /* Reset the timesync increment value. */
10744         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10745         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10746
10747         return 0;
10748 }
10749
10750 static int
10751 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10752                                 struct timespec *timestamp, uint32_t flags)
10753 {
10754         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10755         struct i40e_adapter *adapter = dev->data->dev_private;
10756         uint32_t sync_status;
10757         uint32_t index = flags & 0x03;
10758         uint64_t rx_tstamp_cycles;
10759         uint64_t ns;
10760
10761         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10762         if ((sync_status & (1 << index)) == 0)
10763                 return -EINVAL;
10764
10765         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10766         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10767         *timestamp = rte_ns_to_timespec(ns);
10768
10769         return 0;
10770 }
10771
10772 static int
10773 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10774                                 struct timespec *timestamp)
10775 {
10776         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10777         struct i40e_adapter *adapter = dev->data->dev_private;
10778         uint32_t sync_status;
10779         uint64_t tx_tstamp_cycles;
10780         uint64_t ns;
10781
10782         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10783         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10784                 return -EINVAL;
10785
10786         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10787         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10788         *timestamp = rte_ns_to_timespec(ns);
10789
10790         return 0;
10791 }
10792
10793 /*
10794  * i40e_parse_dcb_configure - parse dcb configure from user
10795  * @dev: the device being configured
10796  * @dcb_cfg: pointer of the result of parse
10797  * @*tc_map: bit map of enabled traffic classes
10798  *
10799  * Returns 0 on success, negative value on failure
10800  */
10801 static int
10802 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10803                          struct i40e_dcbx_config *dcb_cfg,
10804                          uint8_t *tc_map)
10805 {
10806         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10807         uint8_t i, tc_bw, bw_lf;
10808
10809         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10810
10811         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10812         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10813                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10814                 return -EINVAL;
10815         }
10816
10817         /* assume each tc has the same bw */
10818         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10819         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10820                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10821         /* to ensure the sum of tcbw is equal to 100 */
10822         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10823         for (i = 0; i < bw_lf; i++)
10824                 dcb_cfg->etscfg.tcbwtable[i]++;
10825
10826         /* assume each tc has the same Transmission Selection Algorithm */
10827         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10828                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10829
10830         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10831                 dcb_cfg->etscfg.prioritytable[i] =
10832                                 dcb_rx_conf->dcb_tc[i];
10833
10834         /* FW needs one App to configure HW */
10835         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10836         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10837         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10838         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10839
10840         if (dcb_rx_conf->nb_tcs == 0)
10841                 *tc_map = 1; /* tc0 only */
10842         else
10843                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10844
10845         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10846                 dcb_cfg->pfc.willing = 0;
10847                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10848                 dcb_cfg->pfc.pfcenable = *tc_map;
10849         }
10850         return 0;
10851 }
10852
10853
10854 static enum i40e_status_code
10855 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10856                               struct i40e_aqc_vsi_properties_data *info,
10857                               uint8_t enabled_tcmap)
10858 {
10859         enum i40e_status_code ret;
10860         int i, total_tc = 0;
10861         uint16_t qpnum_per_tc, bsf, qp_idx;
10862         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10863         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10864         uint16_t used_queues;
10865
10866         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10867         if (ret != I40E_SUCCESS)
10868                 return ret;
10869
10870         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10871                 if (enabled_tcmap & (1 << i))
10872                         total_tc++;
10873         }
10874         if (total_tc == 0)
10875                 total_tc = 1;
10876         vsi->enabled_tc = enabled_tcmap;
10877
10878         /* different VSI has different queues assigned */
10879         if (vsi->type == I40E_VSI_MAIN)
10880                 used_queues = dev_data->nb_rx_queues -
10881                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10882         else if (vsi->type == I40E_VSI_VMDQ2)
10883                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10884         else {
10885                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10886                 return I40E_ERR_NO_AVAILABLE_VSI;
10887         }
10888
10889         qpnum_per_tc = used_queues / total_tc;
10890         /* Number of queues per enabled TC */
10891         if (qpnum_per_tc == 0) {
10892                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10893                 return I40E_ERR_INVALID_QP_ID;
10894         }
10895         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10896                                 I40E_MAX_Q_PER_TC);
10897         bsf = rte_bsf32(qpnum_per_tc);
10898
10899         /**
10900          * Configure TC and queue mapping parameters, for enabled TC,
10901          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10902          * default queue will serve it.
10903          */
10904         qp_idx = 0;
10905         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10906                 if (vsi->enabled_tc & (1 << i)) {
10907                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10908                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10909                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10910                         qp_idx += qpnum_per_tc;
10911                 } else
10912                         info->tc_mapping[i] = 0;
10913         }
10914
10915         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10916         if (vsi->type == I40E_VSI_SRIOV) {
10917                 info->mapping_flags |=
10918                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10919                 for (i = 0; i < vsi->nb_qps; i++)
10920                         info->queue_mapping[i] =
10921                                 rte_cpu_to_le_16(vsi->base_queue + i);
10922         } else {
10923                 info->mapping_flags |=
10924                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10925                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10926         }
10927         info->valid_sections |=
10928                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10929
10930         return I40E_SUCCESS;
10931 }
10932
10933 /*
10934  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10935  * @veb: VEB to be configured
10936  * @tc_map: enabled TC bitmap
10937  *
10938  * Returns 0 on success, negative value on failure
10939  */
10940 static enum i40e_status_code
10941 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10942 {
10943         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10944         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10945         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10946         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10947         enum i40e_status_code ret = I40E_SUCCESS;
10948         int i;
10949         uint32_t bw_max;
10950
10951         /* Check if enabled_tc is same as existing or new TCs */
10952         if (veb->enabled_tc == tc_map)
10953                 return ret;
10954
10955         /* configure tc bandwidth */
10956         memset(&veb_bw, 0, sizeof(veb_bw));
10957         veb_bw.tc_valid_bits = tc_map;
10958         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10959         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10960                 if (tc_map & BIT_ULL(i))
10961                         veb_bw.tc_bw_share_credits[i] = 1;
10962         }
10963         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10964                                                    &veb_bw, NULL);
10965         if (ret) {
10966                 PMD_INIT_LOG(ERR,
10967                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10968                         hw->aq.asq_last_status);
10969                 return ret;
10970         }
10971
10972         memset(&ets_query, 0, sizeof(ets_query));
10973         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10974                                                    &ets_query, NULL);
10975         if (ret != I40E_SUCCESS) {
10976                 PMD_DRV_LOG(ERR,
10977                         "Failed to get switch_comp ETS configuration %u",
10978                         hw->aq.asq_last_status);
10979                 return ret;
10980         }
10981         memset(&bw_query, 0, sizeof(bw_query));
10982         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10983                                                   &bw_query, NULL);
10984         if (ret != I40E_SUCCESS) {
10985                 PMD_DRV_LOG(ERR,
10986                         "Failed to get switch_comp bandwidth configuration %u",
10987                         hw->aq.asq_last_status);
10988                 return ret;
10989         }
10990
10991         /* store and print out BW info */
10992         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10993         veb->bw_info.bw_max = ets_query.tc_bw_max;
10994         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10995         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10996         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10997                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10998                      I40E_16_BIT_WIDTH);
10999         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11000                 veb->bw_info.bw_ets_share_credits[i] =
11001                                 bw_query.tc_bw_share_credits[i];
11002                 veb->bw_info.bw_ets_credits[i] =
11003                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
11004                 /* 4 bits per TC, 4th bit is reserved */
11005                 veb->bw_info.bw_ets_max[i] =
11006                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
11007                                   RTE_LEN2MASK(3, uint8_t));
11008                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
11009                             veb->bw_info.bw_ets_share_credits[i]);
11010                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
11011                             veb->bw_info.bw_ets_credits[i]);
11012                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
11013                             veb->bw_info.bw_ets_max[i]);
11014         }
11015
11016         veb->enabled_tc = tc_map;
11017
11018         return ret;
11019 }
11020
11021
11022 /*
11023  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
11024  * @vsi: VSI to be configured
11025  * @tc_map: enabled TC bitmap
11026  *
11027  * Returns 0 on success, negative value on failure
11028  */
11029 static enum i40e_status_code
11030 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11031 {
11032         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11033         struct i40e_vsi_context ctxt;
11034         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11035         enum i40e_status_code ret = I40E_SUCCESS;
11036         int i;
11037
11038         /* Check if enabled_tc is same as existing or new TCs */
11039         if (vsi->enabled_tc == tc_map)
11040                 return ret;
11041
11042         /* configure tc bandwidth */
11043         memset(&bw_data, 0, sizeof(bw_data));
11044         bw_data.tc_valid_bits = tc_map;
11045         /* Enable ETS TCs with equal BW Share for now across all VSIs */
11046         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11047                 if (tc_map & BIT_ULL(i))
11048                         bw_data.tc_bw_credits[i] = 1;
11049         }
11050         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11051         if (ret) {
11052                 PMD_INIT_LOG(ERR,
11053                         "AQ command Config VSI BW allocation per TC failed = %d",
11054                         hw->aq.asq_last_status);
11055                 goto out;
11056         }
11057         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11058                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11059
11060         /* Update Queue Pairs Mapping for currently enabled UPs */
11061         ctxt.seid = vsi->seid;
11062         ctxt.pf_num = hw->pf_id;
11063         ctxt.vf_num = 0;
11064         ctxt.uplink_seid = vsi->uplink_seid;
11065         ctxt.info = vsi->info;
11066         i40e_get_cap(hw);
11067         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11068         if (ret)
11069                 goto out;
11070
11071         /* Update the VSI after updating the VSI queue-mapping information */
11072         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11073         if (ret) {
11074                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11075                         hw->aq.asq_last_status);
11076                 goto out;
11077         }
11078         /* update the local VSI info with updated queue map */
11079         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11080                                         sizeof(vsi->info.tc_mapping));
11081         rte_memcpy(&vsi->info.queue_mapping,
11082                         &ctxt.info.queue_mapping,
11083                 sizeof(vsi->info.queue_mapping));
11084         vsi->info.mapping_flags = ctxt.info.mapping_flags;
11085         vsi->info.valid_sections = 0;
11086
11087         /* query and update current VSI BW information */
11088         ret = i40e_vsi_get_bw_config(vsi);
11089         if (ret) {
11090                 PMD_INIT_LOG(ERR,
11091                          "Failed updating vsi bw info, err %s aq_err %s",
11092                          i40e_stat_str(hw, ret),
11093                          i40e_aq_str(hw, hw->aq.asq_last_status));
11094                 goto out;
11095         }
11096
11097         vsi->enabled_tc = tc_map;
11098
11099 out:
11100         return ret;
11101 }
11102
11103 /*
11104  * i40e_dcb_hw_configure - program the dcb setting to hw
11105  * @pf: pf the configuration is taken on
11106  * @new_cfg: new configuration
11107  * @tc_map: enabled TC bitmap
11108  *
11109  * Returns 0 on success, negative value on failure
11110  */
11111 static enum i40e_status_code
11112 i40e_dcb_hw_configure(struct i40e_pf *pf,
11113                       struct i40e_dcbx_config *new_cfg,
11114                       uint8_t tc_map)
11115 {
11116         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11117         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11118         struct i40e_vsi *main_vsi = pf->main_vsi;
11119         struct i40e_vsi_list *vsi_list;
11120         enum i40e_status_code ret;
11121         int i;
11122         uint32_t val;
11123
11124         /* Use the FW API if FW > v4.4*/
11125         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11126               (hw->aq.fw_maj_ver >= 5))) {
11127                 PMD_INIT_LOG(ERR,
11128                         "FW < v4.4, can not use FW LLDP API to configure DCB");
11129                 return I40E_ERR_FIRMWARE_API_VERSION;
11130         }
11131
11132         /* Check if need reconfiguration */
11133         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11134                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11135                 return I40E_SUCCESS;
11136         }
11137
11138         /* Copy the new config to the current config */
11139         *old_cfg = *new_cfg;
11140         old_cfg->etsrec = old_cfg->etscfg;
11141         ret = i40e_set_dcb_config(hw);
11142         if (ret) {
11143                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11144                          i40e_stat_str(hw, ret),
11145                          i40e_aq_str(hw, hw->aq.asq_last_status));
11146                 return ret;
11147         }
11148         /* set receive Arbiter to RR mode and ETS scheme by default */
11149         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11150                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11151                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11152                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11153                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11154                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11155                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11156                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11157                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11158                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11159                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11160                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11161                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11162         }
11163         /* get local mib to check whether it is configured correctly */
11164         /* IEEE mode */
11165         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11166         /* Get Local DCB Config */
11167         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11168                                      &hw->local_dcbx_config);
11169
11170         /* if Veb is created, need to update TC of it at first */
11171         if (main_vsi->veb) {
11172                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11173                 if (ret)
11174                         PMD_INIT_LOG(WARNING,
11175                                  "Failed configuring TC for VEB seid=%d",
11176                                  main_vsi->veb->seid);
11177         }
11178         /* Update each VSI */
11179         i40e_vsi_config_tc(main_vsi, tc_map);
11180         if (main_vsi->veb) {
11181                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11182                         /* Beside main VSI and VMDQ VSIs, only enable default
11183                          * TC for other VSIs
11184                          */
11185                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11186                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11187                                                          tc_map);
11188                         else
11189                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11190                                                          I40E_DEFAULT_TCMAP);
11191                         if (ret)
11192                                 PMD_INIT_LOG(WARNING,
11193                                         "Failed configuring TC for VSI seid=%d",
11194                                         vsi_list->vsi->seid);
11195                         /* continue */
11196                 }
11197         }
11198         return I40E_SUCCESS;
11199 }
11200
11201 /*
11202  * i40e_dcb_init_configure - initial dcb config
11203  * @dev: device being configured
11204  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11205  *
11206  * Returns 0 on success, negative value on failure
11207  */
11208 int
11209 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11210 {
11211         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11212         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11213         int i, ret = 0;
11214
11215         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11216                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11217                 return -ENOTSUP;
11218         }
11219
11220         /* DCB initialization:
11221          * Update DCB configuration from the Firmware and configure
11222          * LLDP MIB change event.
11223          */
11224         if (sw_dcb == TRUE) {
11225                 /* Stopping lldp is necessary for DPDK, but it will cause
11226                  * DCB init failed. For i40e_init_dcb(), the prerequisite
11227                  * for successful initialization of DCB is that LLDP is
11228                  * enabled. So it is needed to start lldp before DCB init
11229                  * and stop it after initialization.
11230                  */
11231                 ret = i40e_aq_start_lldp(hw, true, NULL);
11232                 if (ret != I40E_SUCCESS)
11233                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11234
11235                 ret = i40e_init_dcb(hw, true);
11236                 /* If lldp agent is stopped, the return value from
11237                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11238                  * adminq status. Otherwise, it should return success.
11239                  */
11240                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11241                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11242                         memset(&hw->local_dcbx_config, 0,
11243                                 sizeof(struct i40e_dcbx_config));
11244                         /* set dcb default configuration */
11245                         hw->local_dcbx_config.etscfg.willing = 0;
11246                         hw->local_dcbx_config.etscfg.maxtcs = 0;
11247                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11248                         hw->local_dcbx_config.etscfg.tsatable[0] =
11249                                                 I40E_IEEE_TSA_ETS;
11250                         /* all UPs mapping to TC0 */
11251                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11252                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11253                         hw->local_dcbx_config.etsrec =
11254                                 hw->local_dcbx_config.etscfg;
11255                         hw->local_dcbx_config.pfc.willing = 0;
11256                         hw->local_dcbx_config.pfc.pfccap =
11257                                                 I40E_MAX_TRAFFIC_CLASS;
11258                         /* FW needs one App to configure HW */
11259                         hw->local_dcbx_config.numapps = 1;
11260                         hw->local_dcbx_config.app[0].selector =
11261                                                 I40E_APP_SEL_ETHTYPE;
11262                         hw->local_dcbx_config.app[0].priority = 3;
11263                         hw->local_dcbx_config.app[0].protocolid =
11264                                                 I40E_APP_PROTOID_FCOE;
11265                         ret = i40e_set_dcb_config(hw);
11266                         if (ret) {
11267                                 PMD_INIT_LOG(ERR,
11268                                         "default dcb config fails. err = %d, aq_err = %d.",
11269                                         ret, hw->aq.asq_last_status);
11270                                 return -ENOSYS;
11271                         }
11272                 } else {
11273                         PMD_INIT_LOG(ERR,
11274                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
11275                                 ret, hw->aq.asq_last_status);
11276                         return -ENOTSUP;
11277                 }
11278
11279                 if (i40e_need_stop_lldp(dev)) {
11280                         ret = i40e_aq_stop_lldp(hw, true, true, NULL);
11281                         if (ret != I40E_SUCCESS)
11282                                 PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
11283                 }
11284         } else {
11285                 ret = i40e_aq_start_lldp(hw, true, NULL);
11286                 if (ret != I40E_SUCCESS)
11287                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11288
11289                 ret = i40e_init_dcb(hw, true);
11290                 if (!ret) {
11291                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11292                                 PMD_INIT_LOG(ERR,
11293                                         "HW doesn't support DCBX offload.");
11294                                 return -ENOTSUP;
11295                         }
11296                 } else {
11297                         PMD_INIT_LOG(ERR,
11298                                 "DCBX configuration failed, err = %d, aq_err = %d.",
11299                                 ret, hw->aq.asq_last_status);
11300                         return -ENOTSUP;
11301                 }
11302         }
11303         return 0;
11304 }
11305
11306 /*
11307  * i40e_dcb_setup - setup dcb related config
11308  * @dev: device being configured
11309  *
11310  * Returns 0 on success, negative value on failure
11311  */
11312 static int
11313 i40e_dcb_setup(struct rte_eth_dev *dev)
11314 {
11315         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11316         struct i40e_dcbx_config dcb_cfg;
11317         uint8_t tc_map = 0;
11318         int ret = 0;
11319
11320         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11321                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11322                 return -ENOTSUP;
11323         }
11324
11325         if (pf->vf_num != 0)
11326                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11327
11328         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11329         if (ret) {
11330                 PMD_INIT_LOG(ERR, "invalid dcb config");
11331                 return -EINVAL;
11332         }
11333         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11334         if (ret) {
11335                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11336                 return -ENOSYS;
11337         }
11338
11339         return 0;
11340 }
11341
11342 static int
11343 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11344                       struct rte_eth_dcb_info *dcb_info)
11345 {
11346         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11347         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11348         struct i40e_vsi *vsi = pf->main_vsi;
11349         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11350         uint16_t bsf, tc_mapping;
11351         int i, j = 0;
11352
11353         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11354                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11355         else
11356                 dcb_info->nb_tcs = 1;
11357         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11358                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11359         for (i = 0; i < dcb_info->nb_tcs; i++)
11360                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11361
11362         /* get queue mapping if vmdq is disabled */
11363         if (!pf->nb_cfg_vmdq_vsi) {
11364                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11365                         if (!(vsi->enabled_tc & (1 << i)))
11366                                 continue;
11367                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11368                         dcb_info->tc_queue.tc_rxq[j][i].base =
11369                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11370                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11371                         dcb_info->tc_queue.tc_txq[j][i].base =
11372                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11373                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11374                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11375                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11376                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11377                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11378                 }
11379                 return 0;
11380         }
11381
11382         /* get queue mapping if vmdq is enabled */
11383         do {
11384                 vsi = pf->vmdq[j].vsi;
11385                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11386                         if (!(vsi->enabled_tc & (1 << i)))
11387                                 continue;
11388                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11389                         dcb_info->tc_queue.tc_rxq[j][i].base =
11390                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11391                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11392                         dcb_info->tc_queue.tc_txq[j][i].base =
11393                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11394                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11395                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11396                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11397                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11398                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11399                 }
11400                 j++;
11401         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11402         return 0;
11403 }
11404
11405 static int
11406 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11407 {
11408         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11409         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11410         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11411         uint16_t msix_intr;
11412
11413         msix_intr = intr_handle->intr_vec[queue_id];
11414         if (msix_intr == I40E_MISC_VEC_ID)
11415                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11416                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
11417                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11418                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11419         else
11420                 I40E_WRITE_REG(hw,
11421                                I40E_PFINT_DYN_CTLN(msix_intr -
11422                                                    I40E_RX_VEC_START),
11423                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
11424                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11425                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11426
11427         I40E_WRITE_FLUSH(hw);
11428         rte_intr_ack(&pci_dev->intr_handle);
11429
11430         return 0;
11431 }
11432
11433 static int
11434 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11435 {
11436         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11437         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11438         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11439         uint16_t msix_intr;
11440
11441         msix_intr = intr_handle->intr_vec[queue_id];
11442         if (msix_intr == I40E_MISC_VEC_ID)
11443                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11444                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11445         else
11446                 I40E_WRITE_REG(hw,
11447                                I40E_PFINT_DYN_CTLN(msix_intr -
11448                                                    I40E_RX_VEC_START),
11449                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11450         I40E_WRITE_FLUSH(hw);
11451
11452         return 0;
11453 }
11454
11455 /**
11456  * This function is used to check if the register is valid.
11457  * Below is the valid registers list for X722 only:
11458  * 0x2b800--0x2bb00
11459  * 0x38700--0x38a00
11460  * 0x3d800--0x3db00
11461  * 0x208e00--0x209000
11462  * 0x20be00--0x20c000
11463  * 0x263c00--0x264000
11464  * 0x265c00--0x266000
11465  */
11466 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
11467 {
11468         if ((type != I40E_MAC_X722) &&
11469             ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
11470              (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
11471              (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
11472              (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
11473              (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
11474              (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
11475              (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
11476                 return 0;
11477         else
11478                 return 1;
11479 }
11480
11481 static int i40e_get_regs(struct rte_eth_dev *dev,
11482                          struct rte_dev_reg_info *regs)
11483 {
11484         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11485         uint32_t *ptr_data = regs->data;
11486         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11487         const struct i40e_reg_info *reg_info;
11488
11489         if (ptr_data == NULL) {
11490                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11491                 regs->width = sizeof(uint32_t);
11492                 return 0;
11493         }
11494
11495         /* The first few registers have to be read using AQ operations */
11496         reg_idx = 0;
11497         while (i40e_regs_adminq[reg_idx].name) {
11498                 reg_info = &i40e_regs_adminq[reg_idx++];
11499                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11500                         for (arr_idx2 = 0;
11501                                         arr_idx2 <= reg_info->count2;
11502                                         arr_idx2++) {
11503                                 reg_offset = arr_idx * reg_info->stride1 +
11504                                         arr_idx2 * reg_info->stride2;
11505                                 reg_offset += reg_info->base_addr;
11506                                 ptr_data[reg_offset >> 2] =
11507                                         i40e_read_rx_ctl(hw, reg_offset);
11508                         }
11509         }
11510
11511         /* The remaining registers can be read using primitives */
11512         reg_idx = 0;
11513         while (i40e_regs_others[reg_idx].name) {
11514                 reg_info = &i40e_regs_others[reg_idx++];
11515                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11516                         for (arr_idx2 = 0;
11517                                         arr_idx2 <= reg_info->count2;
11518                                         arr_idx2++) {
11519                                 reg_offset = arr_idx * reg_info->stride1 +
11520                                         arr_idx2 * reg_info->stride2;
11521                                 reg_offset += reg_info->base_addr;
11522                                 if (!i40e_valid_regs(hw->mac.type, reg_offset))
11523                                         ptr_data[reg_offset >> 2] = 0;
11524                                 else
11525                                         ptr_data[reg_offset >> 2] =
11526                                                 I40E_READ_REG(hw, reg_offset);
11527                         }
11528         }
11529
11530         return 0;
11531 }
11532
11533 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11534 {
11535         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11536
11537         /* Convert word count to byte count */
11538         return hw->nvm.sr_size << 1;
11539 }
11540
11541 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11542                            struct rte_dev_eeprom_info *eeprom)
11543 {
11544         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11545         uint16_t *data = eeprom->data;
11546         uint16_t offset, length, cnt_words;
11547         int ret_code;
11548
11549         offset = eeprom->offset >> 1;
11550         length = eeprom->length >> 1;
11551         cnt_words = length;
11552
11553         if (offset > hw->nvm.sr_size ||
11554                 offset + length > hw->nvm.sr_size) {
11555                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11556                 return -EINVAL;
11557         }
11558
11559         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11560
11561         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11562         if (ret_code != I40E_SUCCESS || cnt_words != length) {
11563                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11564                 return -EIO;
11565         }
11566
11567         return 0;
11568 }
11569
11570 static int i40e_get_module_info(struct rte_eth_dev *dev,
11571                                 struct rte_eth_dev_module_info *modinfo)
11572 {
11573         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11574         uint32_t sff8472_comp = 0;
11575         uint32_t sff8472_swap = 0;
11576         uint32_t sff8636_rev = 0;
11577         i40e_status status;
11578         uint32_t type = 0;
11579
11580         /* Check if firmware supports reading module EEPROM. */
11581         if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11582                 PMD_DRV_LOG(ERR,
11583                             "Module EEPROM memory read not supported. "
11584                             "Please update the NVM image.\n");
11585                 return -EINVAL;
11586         }
11587
11588         status = i40e_update_link_info(hw);
11589         if (status)
11590                 return -EIO;
11591
11592         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11593                 PMD_DRV_LOG(ERR,
11594                             "Cannot read module EEPROM memory. "
11595                             "No module connected.\n");
11596                 return -EINVAL;
11597         }
11598
11599         type = hw->phy.link_info.module_type[0];
11600
11601         switch (type) {
11602         case I40E_MODULE_TYPE_SFP:
11603                 status = i40e_aq_get_phy_register(hw,
11604                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11605                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
11606                                 I40E_MODULE_SFF_8472_COMP,
11607                                 &sff8472_comp, NULL);
11608                 if (status)
11609                         return -EIO;
11610
11611                 status = i40e_aq_get_phy_register(hw,
11612                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11613                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
11614                                 I40E_MODULE_SFF_8472_SWAP,
11615                                 &sff8472_swap, NULL);
11616                 if (status)
11617                         return -EIO;
11618
11619                 /* Check if the module requires address swap to access
11620                  * the other EEPROM memory page.
11621                  */
11622                 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11623                         PMD_DRV_LOG(WARNING,
11624                                     "Module address swap to access "
11625                                     "page 0xA2 is not supported.\n");
11626                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11627                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11628                 } else if (sff8472_comp == 0x00) {
11629                         /* Module is not SFF-8472 compliant */
11630                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11631                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11632                 } else {
11633                         modinfo->type = RTE_ETH_MODULE_SFF_8472;
11634                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11635                 }
11636                 break;
11637         case I40E_MODULE_TYPE_QSFP_PLUS:
11638                 /* Read from memory page 0. */
11639                 status = i40e_aq_get_phy_register(hw,
11640                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11641                                 0, 1,
11642                                 I40E_MODULE_REVISION_ADDR,
11643                                 &sff8636_rev, NULL);
11644                 if (status)
11645                         return -EIO;
11646                 /* Determine revision compliance byte */
11647                 if (sff8636_rev > 0x02) {
11648                         /* Module is SFF-8636 compliant */
11649                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
11650                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11651                 } else {
11652                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
11653                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11654                 }
11655                 break;
11656         case I40E_MODULE_TYPE_QSFP28:
11657                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
11658                 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11659                 break;
11660         default:
11661                 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11662                 return -EINVAL;
11663         }
11664         return 0;
11665 }
11666
11667 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11668                                   struct rte_dev_eeprom_info *info)
11669 {
11670         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11671         bool is_sfp = false;
11672         i40e_status status;
11673         uint8_t *data;
11674         uint32_t value = 0;
11675         uint32_t i;
11676
11677         if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11678                 is_sfp = true;
11679
11680         data = info->data;
11681         for (i = 0; i < info->length; i++) {
11682                 u32 offset = i + info->offset;
11683                 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11684
11685                 /* Check if we need to access the other memory page */
11686                 if (is_sfp) {
11687                         if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11688                                 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11689                                 addr = I40E_I2C_EEPROM_DEV_ADDR2;
11690                         }
11691                 } else {
11692                         while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11693                                 /* Compute memory page number and offset. */
11694                                 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11695                                 addr++;
11696                         }
11697                 }
11698                 status = i40e_aq_get_phy_register(hw,
11699                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11700                                 addr, 1, offset, &value, NULL);
11701                 if (status)
11702                         return -EIO;
11703                 data[i] = (uint8_t)value;
11704         }
11705         return 0;
11706 }
11707
11708 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11709                                      struct rte_ether_addr *mac_addr)
11710 {
11711         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11712         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11713         struct i40e_vsi *vsi = pf->main_vsi;
11714         struct i40e_mac_filter_info mac_filter;
11715         struct i40e_mac_filter *f;
11716         int ret;
11717
11718         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
11719                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11720                 return -EINVAL;
11721         }
11722
11723         TAILQ_FOREACH(f, &vsi->mac_list, next) {
11724                 if (rte_is_same_ether_addr(&pf->dev_addr,
11725                                                 &f->mac_info.mac_addr))
11726                         break;
11727         }
11728
11729         if (f == NULL) {
11730                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11731                 return -EIO;
11732         }
11733
11734         mac_filter = f->mac_info;
11735         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11736         if (ret != I40E_SUCCESS) {
11737                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11738                 return -EIO;
11739         }
11740         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11741         ret = i40e_vsi_add_mac(vsi, &mac_filter);
11742         if (ret != I40E_SUCCESS) {
11743                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11744                 return -EIO;
11745         }
11746         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11747
11748         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11749                                         mac_addr->addr_bytes, NULL);
11750         if (ret != I40E_SUCCESS) {
11751                 PMD_DRV_LOG(ERR, "Failed to change mac");
11752                 return -EIO;
11753         }
11754
11755         return 0;
11756 }
11757
11758 static int
11759 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11760 {
11761         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11762         struct rte_eth_dev_data *dev_data = pf->dev_data;
11763         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11764         int ret = 0;
11765
11766         /* check if mtu is within the allowed range */
11767         if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
11768                 return -EINVAL;
11769
11770         /* mtu setting is forbidden if port is start */
11771         if (dev_data->dev_started) {
11772                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11773                             dev_data->port_id);
11774                 return -EBUSY;
11775         }
11776
11777         if (frame_size > I40E_ETH_MAX_LEN)
11778                 dev_data->dev_conf.rxmode.offloads |=
11779                         DEV_RX_OFFLOAD_JUMBO_FRAME;
11780         else
11781                 dev_data->dev_conf.rxmode.offloads &=
11782                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
11783
11784         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11785
11786         return ret;
11787 }
11788
11789 /* Restore ethertype filter */
11790 static void
11791 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11792 {
11793         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11794         struct i40e_ethertype_filter_list
11795                 *ethertype_list = &pf->ethertype.ethertype_list;
11796         struct i40e_ethertype_filter *f;
11797         struct i40e_control_filter_stats stats;
11798         uint16_t flags;
11799
11800         TAILQ_FOREACH(f, ethertype_list, rules) {
11801                 flags = 0;
11802                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11803                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11804                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11805                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11806                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11807
11808                 memset(&stats, 0, sizeof(stats));
11809                 i40e_aq_add_rem_control_packet_filter(hw,
11810                                             f->input.mac_addr.addr_bytes,
11811                                             f->input.ether_type,
11812                                             flags, pf->main_vsi->seid,
11813                                             f->queue, 1, &stats, NULL);
11814         }
11815         PMD_DRV_LOG(INFO, "Ethertype filter:"
11816                     " mac_etype_used = %u, etype_used = %u,"
11817                     " mac_etype_free = %u, etype_free = %u",
11818                     stats.mac_etype_used, stats.etype_used,
11819                     stats.mac_etype_free, stats.etype_free);
11820 }
11821
11822 /* Restore tunnel filter */
11823 static void
11824 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11825 {
11826         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11827         struct i40e_vsi *vsi;
11828         struct i40e_pf_vf *vf;
11829         struct i40e_tunnel_filter_list
11830                 *tunnel_list = &pf->tunnel.tunnel_list;
11831         struct i40e_tunnel_filter *f;
11832         struct i40e_aqc_cloud_filters_element_bb cld_filter;
11833         bool big_buffer = 0;
11834
11835         TAILQ_FOREACH(f, tunnel_list, rules) {
11836                 if (!f->is_to_vf)
11837                         vsi = pf->main_vsi;
11838                 else {
11839                         vf = &pf->vfs[f->vf_id];
11840                         vsi = vf->vsi;
11841                 }
11842                 memset(&cld_filter, 0, sizeof(cld_filter));
11843                 rte_ether_addr_copy((struct rte_ether_addr *)
11844                                 &f->input.outer_mac,
11845                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
11846                 rte_ether_addr_copy((struct rte_ether_addr *)
11847                                 &f->input.inner_mac,
11848                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
11849                 cld_filter.element.inner_vlan = f->input.inner_vlan;
11850                 cld_filter.element.flags = f->input.flags;
11851                 cld_filter.element.tenant_id = f->input.tenant_id;
11852                 cld_filter.element.queue_number = f->queue;
11853                 rte_memcpy(cld_filter.general_fields,
11854                            f->input.general_fields,
11855                            sizeof(f->input.general_fields));
11856
11857                 if (((f->input.flags &
11858                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11859                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11860                     ((f->input.flags &
11861                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11862                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11863                     ((f->input.flags &
11864                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11865                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
11866                         big_buffer = 1;
11867
11868                 if (big_buffer)
11869                         i40e_aq_add_cloud_filters_bb(hw,
11870                                         vsi->seid, &cld_filter, 1);
11871                 else
11872                         i40e_aq_add_cloud_filters(hw, vsi->seid,
11873                                                   &cld_filter.element, 1);
11874         }
11875 }
11876
11877 static void
11878 i40e_filter_restore(struct i40e_pf *pf)
11879 {
11880         i40e_ethertype_filter_restore(pf);
11881         i40e_tunnel_filter_restore(pf);
11882         i40e_fdir_filter_restore(pf);
11883         (void)i40e_hash_filter_restore(pf);
11884 }
11885
11886 bool
11887 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11888 {
11889         if (strcmp(dev->device->driver->name, drv->driver.name))
11890                 return false;
11891
11892         return true;
11893 }
11894
11895 bool
11896 is_i40e_supported(struct rte_eth_dev *dev)
11897 {
11898         return is_device_supported(dev, &rte_i40e_pmd);
11899 }
11900
11901 struct i40e_customized_pctype*
11902 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11903 {
11904         int i;
11905
11906         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11907                 if (pf->customized_pctype[i].index == index)
11908                         return &pf->customized_pctype[i];
11909         }
11910         return NULL;
11911 }
11912
11913 static int
11914 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11915                               uint32_t pkg_size, uint32_t proto_num,
11916                               struct rte_pmd_i40e_proto_info *proto,
11917                               enum rte_pmd_i40e_package_op op)
11918 {
11919         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11920         uint32_t pctype_num;
11921         struct rte_pmd_i40e_ptype_info *pctype;
11922         uint32_t buff_size;
11923         struct i40e_customized_pctype *new_pctype = NULL;
11924         uint8_t proto_id;
11925         uint8_t pctype_value;
11926         char name[64];
11927         uint32_t i, j, n;
11928         int ret;
11929
11930         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11931             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11932                 PMD_DRV_LOG(ERR, "Unsupported operation.");
11933                 return -1;
11934         }
11935
11936         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11937                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
11938                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11939         if (ret) {
11940                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11941                 return -1;
11942         }
11943         if (!pctype_num) {
11944                 PMD_DRV_LOG(INFO, "No new pctype added");
11945                 return -1;
11946         }
11947
11948         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11949         pctype = rte_zmalloc("new_pctype", buff_size, 0);
11950         if (!pctype) {
11951                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11952                 return -1;
11953         }
11954         /* get information about new pctype list */
11955         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11956                                         (uint8_t *)pctype, buff_size,
11957                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11958         if (ret) {
11959                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11960                 rte_free(pctype);
11961                 return -1;
11962         }
11963
11964         /* Update customized pctype. */
11965         for (i = 0; i < pctype_num; i++) {
11966                 pctype_value = pctype[i].ptype_id;
11967                 memset(name, 0, sizeof(name));
11968                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11969                         proto_id = pctype[i].protocols[j];
11970                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11971                                 continue;
11972                         for (n = 0; n < proto_num; n++) {
11973                                 if (proto[n].proto_id != proto_id)
11974                                         continue;
11975                                 strlcat(name, proto[n].name, sizeof(name));
11976                                 strlcat(name, "_", sizeof(name));
11977                                 break;
11978                         }
11979                 }
11980                 name[strlen(name) - 1] = '\0';
11981                 PMD_DRV_LOG(INFO, "name = %s\n", name);
11982                 if (!strcmp(name, "GTPC"))
11983                         new_pctype =
11984                                 i40e_find_customized_pctype(pf,
11985                                                       I40E_CUSTOMIZED_GTPC);
11986                 else if (!strcmp(name, "GTPU_IPV4"))
11987                         new_pctype =
11988                                 i40e_find_customized_pctype(pf,
11989                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11990                 else if (!strcmp(name, "GTPU_IPV6"))
11991                         new_pctype =
11992                                 i40e_find_customized_pctype(pf,
11993                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11994                 else if (!strcmp(name, "GTPU"))
11995                         new_pctype =
11996                                 i40e_find_customized_pctype(pf,
11997                                                       I40E_CUSTOMIZED_GTPU);
11998                 else if (!strcmp(name, "IPV4_L2TPV3"))
11999                         new_pctype =
12000                                 i40e_find_customized_pctype(pf,
12001                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
12002                 else if (!strcmp(name, "IPV6_L2TPV3"))
12003                         new_pctype =
12004                                 i40e_find_customized_pctype(pf,
12005                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
12006                 else if (!strcmp(name, "IPV4_ESP"))
12007                         new_pctype =
12008                                 i40e_find_customized_pctype(pf,
12009                                                 I40E_CUSTOMIZED_ESP_IPV4);
12010                 else if (!strcmp(name, "IPV6_ESP"))
12011                         new_pctype =
12012                                 i40e_find_customized_pctype(pf,
12013                                                 I40E_CUSTOMIZED_ESP_IPV6);
12014                 else if (!strcmp(name, "IPV4_UDP_ESP"))
12015                         new_pctype =
12016                                 i40e_find_customized_pctype(pf,
12017                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
12018                 else if (!strcmp(name, "IPV6_UDP_ESP"))
12019                         new_pctype =
12020                                 i40e_find_customized_pctype(pf,
12021                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
12022                 else if (!strcmp(name, "IPV4_AH"))
12023                         new_pctype =
12024                                 i40e_find_customized_pctype(pf,
12025                                                 I40E_CUSTOMIZED_AH_IPV4);
12026                 else if (!strcmp(name, "IPV6_AH"))
12027                         new_pctype =
12028                                 i40e_find_customized_pctype(pf,
12029                                                 I40E_CUSTOMIZED_AH_IPV6);
12030                 if (new_pctype) {
12031                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
12032                                 new_pctype->pctype = pctype_value;
12033                                 new_pctype->valid = true;
12034                         } else {
12035                                 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
12036                                 new_pctype->valid = false;
12037                         }
12038                 }
12039         }
12040
12041         rte_free(pctype);
12042         return 0;
12043 }
12044
12045 static int
12046 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
12047                              uint32_t pkg_size, uint32_t proto_num,
12048                              struct rte_pmd_i40e_proto_info *proto,
12049                              enum rte_pmd_i40e_package_op op)
12050 {
12051         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
12052         uint16_t port_id = dev->data->port_id;
12053         uint32_t ptype_num;
12054         struct rte_pmd_i40e_ptype_info *ptype;
12055         uint32_t buff_size;
12056         uint8_t proto_id;
12057         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
12058         uint32_t i, j, n;
12059         bool in_tunnel;
12060         int ret;
12061
12062         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12063             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12064                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12065                 return -1;
12066         }
12067
12068         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12069                 rte_pmd_i40e_ptype_mapping_reset(port_id);
12070                 return 0;
12071         }
12072
12073         /* get information about new ptype num */
12074         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12075                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
12076                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
12077         if (ret) {
12078                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
12079                 return ret;
12080         }
12081         if (!ptype_num) {
12082                 PMD_DRV_LOG(INFO, "No new ptype added");
12083                 return -1;
12084         }
12085
12086         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
12087         ptype = rte_zmalloc("new_ptype", buff_size, 0);
12088         if (!ptype) {
12089                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12090                 return -1;
12091         }
12092
12093         /* get information about new ptype list */
12094         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12095                                         (uint8_t *)ptype, buff_size,
12096                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
12097         if (ret) {
12098                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
12099                 rte_free(ptype);
12100                 return ret;
12101         }
12102
12103         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
12104         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
12105         if (!ptype_mapping) {
12106                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12107                 rte_free(ptype);
12108                 return -1;
12109         }
12110
12111         /* Update ptype mapping table. */
12112         for (i = 0; i < ptype_num; i++) {
12113                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
12114                 ptype_mapping[i].sw_ptype = 0;
12115                 in_tunnel = false;
12116                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12117                         proto_id = ptype[i].protocols[j];
12118                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12119                                 continue;
12120                         for (n = 0; n < proto_num; n++) {
12121                                 if (proto[n].proto_id != proto_id)
12122                                         continue;
12123                                 memset(name, 0, sizeof(name));
12124                                 strcpy(name, proto[n].name);
12125                                 PMD_DRV_LOG(INFO, "name = %s\n", name);
12126                                 if (!strncasecmp(name, "PPPOE", 5))
12127                                         ptype_mapping[i].sw_ptype |=
12128                                                 RTE_PTYPE_L2_ETHER_PPPOE;
12129                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12130                                          !in_tunnel) {
12131                                         ptype_mapping[i].sw_ptype |=
12132                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12133                                         ptype_mapping[i].sw_ptype |=
12134                                                 RTE_PTYPE_L4_FRAG;
12135                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12136                                            in_tunnel) {
12137                                         ptype_mapping[i].sw_ptype |=
12138                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12139                                         ptype_mapping[i].sw_ptype |=
12140                                                 RTE_PTYPE_INNER_L4_FRAG;
12141                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
12142                                         ptype_mapping[i].sw_ptype |=
12143                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12144                                         in_tunnel = true;
12145                                 } else if (!strncasecmp(name, "IPV4", 4) &&
12146                                            !in_tunnel)
12147                                         ptype_mapping[i].sw_ptype |=
12148                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12149                                 else if (!strncasecmp(name, "IPV4", 4) &&
12150                                          in_tunnel)
12151                                         ptype_mapping[i].sw_ptype |=
12152                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12153                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12154                                          !in_tunnel) {
12155                                         ptype_mapping[i].sw_ptype |=
12156                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12157                                         ptype_mapping[i].sw_ptype |=
12158                                                 RTE_PTYPE_L4_FRAG;
12159                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12160                                            in_tunnel) {
12161                                         ptype_mapping[i].sw_ptype |=
12162                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12163                                         ptype_mapping[i].sw_ptype |=
12164                                                 RTE_PTYPE_INNER_L4_FRAG;
12165                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
12166                                         ptype_mapping[i].sw_ptype |=
12167                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12168                                         in_tunnel = true;
12169                                 } else if (!strncasecmp(name, "IPV6", 4) &&
12170                                            !in_tunnel)
12171                                         ptype_mapping[i].sw_ptype |=
12172                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12173                                 else if (!strncasecmp(name, "IPV6", 4) &&
12174                                          in_tunnel)
12175                                         ptype_mapping[i].sw_ptype |=
12176                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12177                                 else if (!strncasecmp(name, "UDP", 3) &&
12178                                          !in_tunnel)
12179                                         ptype_mapping[i].sw_ptype |=
12180                                                 RTE_PTYPE_L4_UDP;
12181                                 else if (!strncasecmp(name, "UDP", 3) &&
12182                                          in_tunnel)
12183                                         ptype_mapping[i].sw_ptype |=
12184                                                 RTE_PTYPE_INNER_L4_UDP;
12185                                 else if (!strncasecmp(name, "TCP", 3) &&
12186                                          !in_tunnel)
12187                                         ptype_mapping[i].sw_ptype |=
12188                                                 RTE_PTYPE_L4_TCP;
12189                                 else if (!strncasecmp(name, "TCP", 3) &&
12190                                          in_tunnel)
12191                                         ptype_mapping[i].sw_ptype |=
12192                                                 RTE_PTYPE_INNER_L4_TCP;
12193                                 else if (!strncasecmp(name, "SCTP", 4) &&
12194                                          !in_tunnel)
12195                                         ptype_mapping[i].sw_ptype |=
12196                                                 RTE_PTYPE_L4_SCTP;
12197                                 else if (!strncasecmp(name, "SCTP", 4) &&
12198                                          in_tunnel)
12199                                         ptype_mapping[i].sw_ptype |=
12200                                                 RTE_PTYPE_INNER_L4_SCTP;
12201                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12202                                           !strncasecmp(name, "ICMPV6", 6)) &&
12203                                          !in_tunnel)
12204                                         ptype_mapping[i].sw_ptype |=
12205                                                 RTE_PTYPE_L4_ICMP;
12206                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12207                                           !strncasecmp(name, "ICMPV6", 6)) &&
12208                                          in_tunnel)
12209                                         ptype_mapping[i].sw_ptype |=
12210                                                 RTE_PTYPE_INNER_L4_ICMP;
12211                                 else if (!strncasecmp(name, "GTPC", 4)) {
12212                                         ptype_mapping[i].sw_ptype |=
12213                                                 RTE_PTYPE_TUNNEL_GTPC;
12214                                         in_tunnel = true;
12215                                 } else if (!strncasecmp(name, "GTPU", 4)) {
12216                                         ptype_mapping[i].sw_ptype |=
12217                                                 RTE_PTYPE_TUNNEL_GTPU;
12218                                         in_tunnel = true;
12219                                 } else if (!strncasecmp(name, "ESP", 3)) {
12220                                         ptype_mapping[i].sw_ptype |=
12221                                                 RTE_PTYPE_TUNNEL_ESP;
12222                                         in_tunnel = true;
12223                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
12224                                         ptype_mapping[i].sw_ptype |=
12225                                                 RTE_PTYPE_TUNNEL_GRENAT;
12226                                         in_tunnel = true;
12227                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12228                                            !strncasecmp(name, "L2TPV2", 6) ||
12229                                            !strncasecmp(name, "L2TPV3", 6)) {
12230                                         ptype_mapping[i].sw_ptype |=
12231                                                 RTE_PTYPE_TUNNEL_L2TP;
12232                                         in_tunnel = true;
12233                                 }
12234
12235                                 break;
12236                         }
12237                 }
12238         }
12239
12240         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12241                                                 ptype_num, 0);
12242         if (ret)
12243                 PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
12244
12245         rte_free(ptype_mapping);
12246         rte_free(ptype);
12247         return ret;
12248 }
12249
12250 void
12251 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12252                             uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12253 {
12254         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12255         uint32_t proto_num;
12256         struct rte_pmd_i40e_proto_info *proto;
12257         uint32_t buff_size;
12258         uint32_t i;
12259         int ret;
12260
12261         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12262             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12263                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12264                 return;
12265         }
12266
12267         /* get information about protocol number */
12268         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12269                                        (uint8_t *)&proto_num, sizeof(proto_num),
12270                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12271         if (ret) {
12272                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
12273                 return;
12274         }
12275         if (!proto_num) {
12276                 PMD_DRV_LOG(INFO, "No new protocol added");
12277                 return;
12278         }
12279
12280         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12281         proto = rte_zmalloc("new_proto", buff_size, 0);
12282         if (!proto) {
12283                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12284                 return;
12285         }
12286
12287         /* get information about protocol list */
12288         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12289                                         (uint8_t *)proto, buff_size,
12290                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12291         if (ret) {
12292                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
12293                 rte_free(proto);
12294                 return;
12295         }
12296
12297         /* Check if GTP is supported. */
12298         for (i = 0; i < proto_num; i++) {
12299                 if (!strncmp(proto[i].name, "GTP", 3)) {
12300                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12301                                 pf->gtp_support = true;
12302                         else
12303                                 pf->gtp_support = false;
12304                         break;
12305                 }
12306         }
12307
12308         /* Check if ESP is supported. */
12309         for (i = 0; i < proto_num; i++) {
12310                 if (!strncmp(proto[i].name, "ESP", 3)) {
12311                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12312                                 pf->esp_support = true;
12313                         else
12314                                 pf->esp_support = false;
12315                         break;
12316                 }
12317         }
12318
12319         /* Update customized pctype info */
12320         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12321                                             proto_num, proto, op);
12322         if (ret)
12323                 PMD_DRV_LOG(INFO, "No pctype is updated.");
12324
12325         /* Update customized ptype info */
12326         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12327                                            proto_num, proto, op);
12328         if (ret)
12329                 PMD_DRV_LOG(INFO, "No ptype is updated.");
12330
12331         rte_free(proto);
12332 }
12333
12334 /* Create a QinQ cloud filter
12335  *
12336  * The Fortville NIC has limited resources for tunnel filters,
12337  * so we can only reuse existing filters.
12338  *
12339  * In step 1 we define which Field Vector fields can be used for
12340  * filter types.
12341  * As we do not have the inner tag defined as a field,
12342  * we have to define it first, by reusing one of L1 entries.
12343  *
12344  * In step 2 we are replacing one of existing filter types with
12345  * a new one for QinQ.
12346  * As we reusing L1 and replacing L2, some of the default filter
12347  * types will disappear,which depends on L1 and L2 entries we reuse.
12348  *
12349  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12350  *
12351  * 1.   Create L1 filter of outer vlan (12b) which will be in use
12352  *              later when we define the cloud filter.
12353  *      a.      Valid_flags.replace_cloud = 0
12354  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
12355  *      c.      New_filter = 0x10
12356  *      d.      TR bit = 0xff (optional, not used here)
12357  *      e.      Buffer – 2 entries:
12358  *              i.      Byte 0 = 8 (outer vlan FV index).
12359  *                      Byte 1 = 0 (rsv)
12360  *                      Byte 2-3 = 0x0fff
12361  *              ii.     Byte 0 = 37 (inner vlan FV index).
12362  *                      Byte 1 =0 (rsv)
12363  *                      Byte 2-3 = 0x0fff
12364  *
12365  * Step 2:
12366  * 2.   Create cloud filter using two L1 filters entries: stag and
12367  *              new filter(outer vlan+ inner vlan)
12368  *      a.      Valid_flags.replace_cloud = 1
12369  *      b.      Old_filter = 1 (instead of outer IP)
12370  *      c.      New_filter = 0x10
12371  *      d.      Buffer – 2 entries:
12372  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
12373  *                      Byte 1-3 = 0 (rsv)
12374  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12375  *                      Byte 9-11 = 0 (rsv)
12376  */
12377 static int
12378 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12379 {
12380         int ret = -ENOTSUP;
12381         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12382         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12383         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12384         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
12385
12386         if (pf->support_multi_driver) {
12387                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12388                 return ret;
12389         }
12390
12391         /* Init */
12392         memset(&filter_replace, 0,
12393                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12394         memset(&filter_replace_buf, 0,
12395                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12396
12397         /* create L1 filter */
12398         filter_replace.old_filter_type =
12399                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12400         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12401         filter_replace.tr_bit = 0;
12402
12403         /* Prepare the buffer, 2 entries */
12404         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12405         filter_replace_buf.data[0] |=
12406                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12407         /* Field Vector 12b mask */
12408         filter_replace_buf.data[2] = 0xff;
12409         filter_replace_buf.data[3] = 0x0f;
12410         filter_replace_buf.data[4] =
12411                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12412         filter_replace_buf.data[4] |=
12413                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12414         /* Field Vector 12b mask */
12415         filter_replace_buf.data[6] = 0xff;
12416         filter_replace_buf.data[7] = 0x0f;
12417         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12418                         &filter_replace_buf);
12419         if (ret != I40E_SUCCESS)
12420                 return ret;
12421
12422         if (filter_replace.old_filter_type !=
12423             filter_replace.new_filter_type)
12424                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
12425                             " original: 0x%x, new: 0x%x",
12426                             dev->device->name,
12427                             filter_replace.old_filter_type,
12428                             filter_replace.new_filter_type);
12429
12430         /* Apply the second L2 cloud filter */
12431         memset(&filter_replace, 0,
12432                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12433         memset(&filter_replace_buf, 0,
12434                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12435
12436         /* create L2 filter, input for L2 filter will be L1 filter  */
12437         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12438         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12439         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12440
12441         /* Prepare the buffer, 2 entries */
12442         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12443         filter_replace_buf.data[0] |=
12444                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12445         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12446         filter_replace_buf.data[4] |=
12447                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12448         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12449                         &filter_replace_buf);
12450         if (!ret && (filter_replace.old_filter_type !=
12451                      filter_replace.new_filter_type))
12452                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
12453                             " original: 0x%x, new: 0x%x",
12454                             dev->device->name,
12455                             filter_replace.old_filter_type,
12456                             filter_replace.new_filter_type);
12457
12458         return ret;
12459 }
12460
12461 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
12462 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
12463 #ifdef RTE_ETHDEV_DEBUG_RX
12464 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_rx, rx, DEBUG);
12465 #endif
12466 #ifdef RTE_ETHDEV_DEBUG_TX
12467 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_tx, tx, DEBUG);
12468 #endif
12469
12470 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12471                               ETH_I40E_FLOATING_VEB_ARG "=1"
12472                               ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
12473                               ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12474                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1");