ea5d384283811c057648eaea67cefb867f909072
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29 #include <rte_bitmap.h>
30 #include <rte_os_shim.h>
31
32 #include "i40e_logs.h"
33 #include "base/i40e_prototype.h"
34 #include "base/i40e_adminq_cmd.h"
35 #include "base/i40e_type.h"
36 #include "base/i40e_register.h"
37 #include "base/i40e_dcb.h"
38 #include "i40e_ethdev.h"
39 #include "i40e_rxtx.h"
40 #include "i40e_pf.h"
41 #include "i40e_regs.h"
42 #include "rte_pmd_i40e.h"
43 #include "i40e_hash.h"
44
45 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
46 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
47 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
48 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG   "queue-num-per-vf"
49 #define ETH_I40E_VF_MSG_CFG             "vf_msg_cfg"
50
51 #define I40E_CLEAR_PXE_WAIT_MS     200
52 #define I40E_VSI_TSR_QINQ_STRIP         0x4010
53 #define I40E_VSI_TSR(_i)        (0x00050800 + ((_i) * 4))
54
55 /* Maximun number of capability elements */
56 #define I40E_MAX_CAP_ELE_NUM       128
57
58 /* Wait count and interval */
59 #define I40E_CHK_Q_ENA_COUNT       1000
60 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
61
62 /* Maximun number of VSI */
63 #define I40E_MAX_NUM_VSIS          (384UL)
64
65 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
66
67 /* Flow control default timer */
68 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
69
70 /* Flow control enable fwd bit */
71 #define I40E_PRTMAC_FWD_CTRL   0x00000001
72
73 /* Receive Packet Buffer size */
74 #define I40E_RXPBSIZE (968 * 1024)
75
76 /* Kilobytes shift */
77 #define I40E_KILOSHIFT 10
78
79 /* Flow control default high water */
80 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
81
82 /* Flow control default low water */
83 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
84
85 /* Receive Average Packet Size in Byte*/
86 #define I40E_PACKET_AVERAGE_SIZE 128
87
88 /* Mask of PF interrupt causes */
89 #define I40E_PFINT_ICR0_ENA_MASK ( \
90                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
91                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
92                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
93                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
94                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
95                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
96                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
97                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
98                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
99
100 #define I40E_FLOW_TYPES ( \
101         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
104         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
105         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
106         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
107         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
108         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
109         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
110         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
111         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
112
113 /* Additional timesync values. */
114 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
115 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
116 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
117 #define I40E_PRTTSYN_TSYNENA     0x80000000
118 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
119 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
120
121 /**
122  * Below are values for writing un-exposed registers suggested
123  * by silicon experts
124  */
125 /* Destination MAC address */
126 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
127 /* Source MAC address */
128 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
129 /* Outer (S-Tag) VLAN tag in the outer L2 header */
130 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
131 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
132 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
133 /* Single VLAN tag in the inner L2 header */
134 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
135 /* Source IPv4 address */
136 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
137 /* Destination IPv4 address */
138 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
139 /* Source IPv4 address for X722 */
140 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
141 /* Destination IPv4 address for X722 */
142 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
143 /* IPv4 Protocol for X722 */
144 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
145 /* IPv4 Time to Live for X722 */
146 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
147 /* IPv4 Type of Service (TOS) */
148 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
149 /* IPv4 Protocol */
150 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
151 /* IPv4 Time to Live */
152 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
153 /* Source IPv6 address */
154 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
155 /* Destination IPv6 address */
156 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
157 /* IPv6 Traffic Class (TC) */
158 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
159 /* IPv6 Next Header */
160 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
161 /* IPv6 Hop Limit */
162 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
163 /* Source L4 port */
164 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
165 /* Destination L4 port */
166 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
167 /* SCTP verification tag */
168 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
169 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
170 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
171 /* Source port of tunneling UDP */
172 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
173 /* Destination port of tunneling UDP */
174 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
175 /* UDP Tunneling ID, NVGRE/GRE key */
176 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
177 /* Last ether type */
178 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
179 /* Tunneling outer destination IPv4 address */
180 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
181 /* Tunneling outer destination IPv6 address */
182 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
183 /* 1st word of flex payload */
184 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
185 /* 2nd word of flex payload */
186 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
187 /* 3rd word of flex payload */
188 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
189 /* 4th word of flex payload */
190 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
191 /* 5th word of flex payload */
192 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
193 /* 6th word of flex payload */
194 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
195 /* 7th word of flex payload */
196 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
197 /* 8th word of flex payload */
198 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
199 /* all 8 words flex payload */
200 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
201 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
202
203 #define I40E_TRANSLATE_INSET 0
204 #define I40E_TRANSLATE_REG   1
205
206 #define I40E_INSET_IPV4_TOS_MASK        0x0000FF00UL
207 #define I40E_INSET_IPV4_TTL_MASK        0x000000FFUL
208 #define I40E_INSET_IPV4_PROTO_MASK      0x0000FF00UL
209 #define I40E_INSET_IPV6_TC_MASK         0x0000F00FUL
210 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x0000FF00UL
211 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000000FFUL
212
213 /* PCI offset for querying capability */
214 #define PCI_DEV_CAP_REG            0xA4
215 /* PCI offset for enabling/disabling Extended Tag */
216 #define PCI_DEV_CTRL_REG           0xA8
217 /* Bit mask of Extended Tag capability */
218 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
219 /* Bit shift of Extended Tag enable/disable */
220 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
221 /* Bit mask of Extended Tag enable/disable */
222 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
223
224 #define I40E_GLQF_PIT_IPV4_START        2
225 #define I40E_GLQF_PIT_IPV4_COUNT        2
226 #define I40E_GLQF_PIT_IPV6_START        4
227 #define I40E_GLQF_PIT_IPV6_COUNT        2
228
229 #define I40E_GLQF_PIT_SOURCE_OFF_GET(a) \
230                                 (((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \
231                                  I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
232
233 #define I40E_GLQF_PIT_DEST_OFF_GET(a) \
234                                 (((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \
235                                  I40E_GLQF_PIT_DEST_OFF_SHIFT)
236
237 #define I40E_GLQF_PIT_FSIZE_GET(a)      (((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \
238                                          I40E_GLQF_PIT_FSIZE_SHIFT)
239
240 #define I40E_GLQF_PIT_BUILD(off, mask)  (((off) << 16) | (mask))
241 #define I40E_FDIR_FIELD_OFFSET(a)       ((a) >> 1)
242
243 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
244 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
245 static int i40e_dev_configure(struct rte_eth_dev *dev);
246 static int i40e_dev_start(struct rte_eth_dev *dev);
247 static int i40e_dev_stop(struct rte_eth_dev *dev);
248 static int i40e_dev_close(struct rte_eth_dev *dev);
249 static int  i40e_dev_reset(struct rte_eth_dev *dev);
250 static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
251 static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
252 static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
253 static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
254 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
255 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
256 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
257                                struct rte_eth_stats *stats);
258 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
259                                struct rte_eth_xstat *xstats, unsigned n);
260 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
261                                      struct rte_eth_xstat_name *xstats_names,
262                                      unsigned limit);
263 static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
264 static int i40e_fw_version_get(struct rte_eth_dev *dev,
265                                 char *fw_version, size_t fw_size);
266 static int i40e_dev_info_get(struct rte_eth_dev *dev,
267                              struct rte_eth_dev_info *dev_info);
268 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
269                                 uint16_t vlan_id,
270                                 int on);
271 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
272                               enum rte_vlan_type vlan_type,
273                               uint16_t tpid);
274 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
275 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
276                                       uint16_t queue,
277                                       int on);
278 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
279 static int i40e_dev_led_on(struct rte_eth_dev *dev);
280 static int i40e_dev_led_off(struct rte_eth_dev *dev);
281 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
282                               struct rte_eth_fc_conf *fc_conf);
283 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
284                               struct rte_eth_fc_conf *fc_conf);
285 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
286                                        struct rte_eth_pfc_conf *pfc_conf);
287 static int i40e_macaddr_add(struct rte_eth_dev *dev,
288                             struct rte_ether_addr *mac_addr,
289                             uint32_t index,
290                             uint32_t pool);
291 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
292 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
293                                     struct rte_eth_rss_reta_entry64 *reta_conf,
294                                     uint16_t reta_size);
295 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
296                                    struct rte_eth_rss_reta_entry64 *reta_conf,
297                                    uint16_t reta_size);
298
299 static int i40e_get_cap(struct i40e_hw *hw);
300 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
301 static int i40e_pf_setup(struct i40e_pf *pf);
302 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
303 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
304 static int i40e_dcb_setup(struct rte_eth_dev *dev);
305 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
306                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
307 static void i40e_stat_update_48(struct i40e_hw *hw,
308                                uint32_t hireg,
309                                uint32_t loreg,
310                                bool offset_loaded,
311                                uint64_t *offset,
312                                uint64_t *stat);
313 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
314 static void i40e_dev_interrupt_handler(void *param);
315 static void i40e_dev_alarm_handler(void *param);
316 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
317                                 uint32_t base, uint32_t num);
318 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
319 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
320                         uint32_t base);
321 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
322                         uint16_t num);
323 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
324 static int i40e_veb_release(struct i40e_veb *veb);
325 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
326                                                 struct i40e_vsi *vsi);
327 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
328 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
329                                              struct i40e_macvlan_filter *mv_f,
330                                              int num,
331                                              uint16_t vlan);
332 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
333 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
334                                     struct rte_eth_rss_conf *rss_conf);
335 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
336                                       struct rte_eth_rss_conf *rss_conf);
337 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
338                                         struct rte_eth_udp_tunnel *udp_tunnel);
339 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
340                                         struct rte_eth_udp_tunnel *udp_tunnel);
341 static void i40e_filter_input_set_init(struct i40e_pf *pf);
342 static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
343                                  const struct rte_flow_ops **ops);
344 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
345                                   struct rte_eth_dcb_info *dcb_info);
346 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
347 static void i40e_configure_registers(struct i40e_hw *hw);
348 static void i40e_hw_init(struct rte_eth_dev *dev);
349 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
350 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
351                                                      uint16_t seid,
352                                                      uint16_t rule_type,
353                                                      uint16_t *entries,
354                                                      uint16_t count,
355                                                      uint16_t rule_id);
356 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
357                         struct rte_eth_mirror_conf *mirror_conf,
358                         uint8_t sw_id, uint8_t on);
359 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
360
361 static int i40e_timesync_enable(struct rte_eth_dev *dev);
362 static int i40e_timesync_disable(struct rte_eth_dev *dev);
363 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
364                                            struct timespec *timestamp,
365                                            uint32_t flags);
366 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
367                                            struct timespec *timestamp);
368 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
369
370 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
371
372 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
373                                    struct timespec *timestamp);
374 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
375                                     const struct timespec *timestamp);
376
377 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
378                                          uint16_t queue_id);
379 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
380                                           uint16_t queue_id);
381
382 static int i40e_get_regs(struct rte_eth_dev *dev,
383                          struct rte_dev_reg_info *regs);
384
385 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
386
387 static int i40e_get_eeprom(struct rte_eth_dev *dev,
388                            struct rte_dev_eeprom_info *eeprom);
389
390 static int i40e_get_module_info(struct rte_eth_dev *dev,
391                                 struct rte_eth_dev_module_info *modinfo);
392 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
393                                   struct rte_dev_eeprom_info *info);
394
395 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
396                                       struct rte_ether_addr *mac_addr);
397
398 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
399
400 static int i40e_ethertype_filter_convert(
401         const struct rte_eth_ethertype_filter *input,
402         struct i40e_ethertype_filter *filter);
403 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
404                                    struct i40e_ethertype_filter *filter);
405
406 static int i40e_tunnel_filter_convert(
407         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
408         struct i40e_tunnel_filter *tunnel_filter);
409 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
410                                 struct i40e_tunnel_filter *tunnel_filter);
411 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
412
413 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
414 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
415 static void i40e_filter_restore(struct i40e_pf *pf);
416 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
417
418 static const char *const valid_keys[] = {
419         ETH_I40E_FLOATING_VEB_ARG,
420         ETH_I40E_FLOATING_VEB_LIST_ARG,
421         ETH_I40E_SUPPORT_MULTI_DRIVER,
422         ETH_I40E_QUEUE_NUM_PER_VF_ARG,
423         ETH_I40E_VF_MSG_CFG,
424         NULL};
425
426 static const struct rte_pci_id pci_id_i40e_map[] = {
427         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
428         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
429         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
430         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
431         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
432         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
433         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
434         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
435         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
436         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
437         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
438         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
439         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
440         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
441         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
442         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
443         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
444         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
445         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
446         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
447         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
448         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
449         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
450         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
451         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
452         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
453         { .vendor_id = 0, /* sentinel */ },
454 };
455
456 static const struct eth_dev_ops i40e_eth_dev_ops = {
457         .dev_configure                = i40e_dev_configure,
458         .dev_start                    = i40e_dev_start,
459         .dev_stop                     = i40e_dev_stop,
460         .dev_close                    = i40e_dev_close,
461         .dev_reset                    = i40e_dev_reset,
462         .promiscuous_enable           = i40e_dev_promiscuous_enable,
463         .promiscuous_disable          = i40e_dev_promiscuous_disable,
464         .allmulticast_enable          = i40e_dev_allmulticast_enable,
465         .allmulticast_disable         = i40e_dev_allmulticast_disable,
466         .dev_set_link_up              = i40e_dev_set_link_up,
467         .dev_set_link_down            = i40e_dev_set_link_down,
468         .link_update                  = i40e_dev_link_update,
469         .stats_get                    = i40e_dev_stats_get,
470         .xstats_get                   = i40e_dev_xstats_get,
471         .xstats_get_names             = i40e_dev_xstats_get_names,
472         .stats_reset                  = i40e_dev_stats_reset,
473         .xstats_reset                 = i40e_dev_stats_reset,
474         .fw_version_get               = i40e_fw_version_get,
475         .dev_infos_get                = i40e_dev_info_get,
476         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
477         .vlan_filter_set              = i40e_vlan_filter_set,
478         .vlan_tpid_set                = i40e_vlan_tpid_set,
479         .vlan_offload_set             = i40e_vlan_offload_set,
480         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
481         .vlan_pvid_set                = i40e_vlan_pvid_set,
482         .rx_queue_start               = i40e_dev_rx_queue_start,
483         .rx_queue_stop                = i40e_dev_rx_queue_stop,
484         .tx_queue_start               = i40e_dev_tx_queue_start,
485         .tx_queue_stop                = i40e_dev_tx_queue_stop,
486         .rx_queue_setup               = i40e_dev_rx_queue_setup,
487         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
488         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
489         .rx_queue_release             = i40e_dev_rx_queue_release,
490         .tx_queue_setup               = i40e_dev_tx_queue_setup,
491         .tx_queue_release             = i40e_dev_tx_queue_release,
492         .dev_led_on                   = i40e_dev_led_on,
493         .dev_led_off                  = i40e_dev_led_off,
494         .flow_ctrl_get                = i40e_flow_ctrl_get,
495         .flow_ctrl_set                = i40e_flow_ctrl_set,
496         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
497         .mac_addr_add                 = i40e_macaddr_add,
498         .mac_addr_remove              = i40e_macaddr_remove,
499         .reta_update                  = i40e_dev_rss_reta_update,
500         .reta_query                   = i40e_dev_rss_reta_query,
501         .rss_hash_update              = i40e_dev_rss_hash_update,
502         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
503         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
504         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
505         .flow_ops_get                 = i40e_dev_flow_ops_get,
506         .rxq_info_get                 = i40e_rxq_info_get,
507         .txq_info_get                 = i40e_txq_info_get,
508         .rx_burst_mode_get            = i40e_rx_burst_mode_get,
509         .tx_burst_mode_get            = i40e_tx_burst_mode_get,
510         .mirror_rule_set              = i40e_mirror_rule_set,
511         .mirror_rule_reset            = i40e_mirror_rule_reset,
512         .timesync_enable              = i40e_timesync_enable,
513         .timesync_disable             = i40e_timesync_disable,
514         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
515         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
516         .get_dcb_info                 = i40e_dev_get_dcb_info,
517         .timesync_adjust_time         = i40e_timesync_adjust_time,
518         .timesync_read_time           = i40e_timesync_read_time,
519         .timesync_write_time          = i40e_timesync_write_time,
520         .get_reg                      = i40e_get_regs,
521         .get_eeprom_length            = i40e_get_eeprom_length,
522         .get_eeprom                   = i40e_get_eeprom,
523         .get_module_info              = i40e_get_module_info,
524         .get_module_eeprom            = i40e_get_module_eeprom,
525         .mac_addr_set                 = i40e_set_default_mac_addr,
526         .mtu_set                      = i40e_dev_mtu_set,
527         .tm_ops_get                   = i40e_tm_ops_get,
528         .tx_done_cleanup              = i40e_tx_done_cleanup,
529         .get_monitor_addr             = i40e_get_monitor_addr,
530 };
531
532 /* store statistics names and its offset in stats structure */
533 struct rte_i40e_xstats_name_off {
534         char name[RTE_ETH_XSTATS_NAME_SIZE];
535         unsigned offset;
536 };
537
538 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
539         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
540         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
541         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
542         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
543         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
544                 rx_unknown_protocol)},
545         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
546         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
547         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
548         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
549 };
550
551 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
552                 sizeof(rte_i40e_stats_strings[0]))
553
554 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
555         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
556                 tx_dropped_link_down)},
557         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
558         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
559                 illegal_bytes)},
560         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
561         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
562                 mac_local_faults)},
563         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
564                 mac_remote_faults)},
565         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
566                 rx_length_errors)},
567         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
568         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
569         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
570         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
571         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
572         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
573                 rx_size_127)},
574         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
575                 rx_size_255)},
576         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
577                 rx_size_511)},
578         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
579                 rx_size_1023)},
580         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
581                 rx_size_1522)},
582         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
583                 rx_size_big)},
584         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
585                 rx_undersize)},
586         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
587                 rx_oversize)},
588         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
589                 mac_short_packet_dropped)},
590         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
591                 rx_fragments)},
592         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
593         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
594         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
595                 tx_size_127)},
596         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
597                 tx_size_255)},
598         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
599                 tx_size_511)},
600         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
601                 tx_size_1023)},
602         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
603                 tx_size_1522)},
604         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
605                 tx_size_big)},
606         {"rx_flow_director_atr_match_packets",
607                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
608         {"rx_flow_director_sb_match_packets",
609                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
610         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
611                 tx_lpi_status)},
612         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
613                 rx_lpi_status)},
614         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
615                 tx_lpi_count)},
616         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
617                 rx_lpi_count)},
618 };
619
620 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
621                 sizeof(rte_i40e_hw_port_strings[0]))
622
623 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
624         {"xon_packets", offsetof(struct i40e_hw_port_stats,
625                 priority_xon_rx)},
626         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
627                 priority_xoff_rx)},
628 };
629
630 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
631                 sizeof(rte_i40e_rxq_prio_strings[0]))
632
633 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
634         {"xon_packets", offsetof(struct i40e_hw_port_stats,
635                 priority_xon_tx)},
636         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
637                 priority_xoff_tx)},
638         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
639                 priority_xon_2_xoff)},
640 };
641
642 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
643                 sizeof(rte_i40e_txq_prio_strings[0]))
644
645 static int
646 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
647         struct rte_pci_device *pci_dev)
648 {
649         char name[RTE_ETH_NAME_MAX_LEN];
650         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
651         int i, retval;
652
653         if (pci_dev->device.devargs) {
654                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
655                                 &eth_da);
656                 if (retval)
657                         return retval;
658         }
659
660         if (eth_da.nb_representor_ports > 0 &&
661             eth_da.type != RTE_ETH_REPRESENTOR_VF) {
662                 PMD_DRV_LOG(ERR, "unsupported representor type: %s\n",
663                             pci_dev->device.devargs->args);
664                 return -ENOTSUP;
665         }
666
667         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
668                 sizeof(struct i40e_adapter),
669                 eth_dev_pci_specific_init, pci_dev,
670                 eth_i40e_dev_init, NULL);
671
672         if (retval || eth_da.nb_representor_ports < 1)
673                 return retval;
674
675         /* probe VF representor ports */
676         struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
677                 pci_dev->device.name);
678
679         if (pf_ethdev == NULL)
680                 return -ENODEV;
681
682         for (i = 0; i < eth_da.nb_representor_ports; i++) {
683                 struct i40e_vf_representor representor = {
684                         .vf_id = eth_da.representor_ports[i],
685                         .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
686                                 pf_ethdev->data->dev_private)->switch_domain_id,
687                         .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
688                                 pf_ethdev->data->dev_private)
689                 };
690
691                 /* representor port net_bdf_port */
692                 snprintf(name, sizeof(name), "net_%s_representor_%d",
693                         pci_dev->device.name, eth_da.representor_ports[i]);
694
695                 retval = rte_eth_dev_create(&pci_dev->device, name,
696                         sizeof(struct i40e_vf_representor), NULL, NULL,
697                         i40e_vf_representor_init, &representor);
698
699                 if (retval)
700                         PMD_DRV_LOG(ERR, "failed to create i40e vf "
701                                 "representor %s.", name);
702         }
703
704         return 0;
705 }
706
707 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
708 {
709         struct rte_eth_dev *ethdev;
710
711         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
712         if (!ethdev)
713                 return 0;
714
715         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
716                 return rte_eth_dev_pci_generic_remove(pci_dev,
717                                         i40e_vf_representor_uninit);
718         else
719                 return rte_eth_dev_pci_generic_remove(pci_dev,
720                                                 eth_i40e_dev_uninit);
721 }
722
723 static struct rte_pci_driver rte_i40e_pmd = {
724         .id_table = pci_id_i40e_map,
725         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
726         .probe = eth_i40e_pci_probe,
727         .remove = eth_i40e_pci_remove,
728 };
729
730 static inline void
731 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
732                          uint32_t reg_val)
733 {
734         uint32_t ori_reg_val;
735         struct rte_eth_dev *dev;
736
737         ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
738         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
739         i40e_write_rx_ctl(hw, reg_addr, reg_val);
740         if (ori_reg_val != reg_val)
741                 PMD_DRV_LOG(WARNING,
742                             "i40e device %s changed global register [0x%08x]."
743                             " original: 0x%08x, new: 0x%08x",
744                             dev->device->name, reg_addr, ori_reg_val, reg_val);
745 }
746
747 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
748 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
749 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
750
751 #ifndef I40E_GLQF_ORT
752 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
753 #endif
754 #ifndef I40E_GLQF_PIT
755 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
756 #endif
757 #ifndef I40E_GLQF_L3_MAP
758 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
759 #endif
760
761 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
762 {
763         /*
764          * Initialize registers for parsing packet type of QinQ
765          * This should be removed from code once proper
766          * configuration API is added to avoid configuration conflicts
767          * between ports of the same device.
768          */
769         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
770         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
771 }
772
773 static inline void i40e_config_automask(struct i40e_pf *pf)
774 {
775         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
776         uint32_t val;
777
778         /* INTENA flag is not auto-cleared for interrupt */
779         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
780         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
781                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
782
783         /* If support multi-driver, PF will use INT0. */
784         if (!pf->support_multi_driver)
785                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
786
787         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
788 }
789
790 static inline void i40e_clear_automask(struct i40e_pf *pf)
791 {
792         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
793         uint32_t val;
794
795         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
796         val &= ~(I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
797                  I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK);
798
799         if (!pf->support_multi_driver)
800                 val &= ~I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
801
802         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
803 }
804
805 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
806
807 /*
808  * Add a ethertype filter to drop all flow control frames transmitted
809  * from VSIs.
810 */
811 static void
812 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
813 {
814         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
815         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
816                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
817                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
818         int ret;
819
820         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
821                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
822                                 pf->main_vsi_seid, 0,
823                                 TRUE, NULL, NULL);
824         if (ret)
825                 PMD_INIT_LOG(ERR,
826                         "Failed to add filter to drop flow control frames from VSIs.");
827 }
828
829 static int
830 floating_veb_list_handler(__rte_unused const char *key,
831                           const char *floating_veb_value,
832                           void *opaque)
833 {
834         int idx = 0;
835         unsigned int count = 0;
836         char *end = NULL;
837         int min, max;
838         bool *vf_floating_veb = opaque;
839
840         while (isblank(*floating_veb_value))
841                 floating_veb_value++;
842
843         /* Reset floating VEB configuration for VFs */
844         for (idx = 0; idx < I40E_MAX_VF; idx++)
845                 vf_floating_veb[idx] = false;
846
847         min = I40E_MAX_VF;
848         do {
849                 while (isblank(*floating_veb_value))
850                         floating_veb_value++;
851                 if (*floating_veb_value == '\0')
852                         return -1;
853                 errno = 0;
854                 idx = strtoul(floating_veb_value, &end, 10);
855                 if (errno || end == NULL)
856                         return -1;
857                 while (isblank(*end))
858                         end++;
859                 if (*end == '-') {
860                         min = idx;
861                 } else if ((*end == ';') || (*end == '\0')) {
862                         max = idx;
863                         if (min == I40E_MAX_VF)
864                                 min = idx;
865                         if (max >= I40E_MAX_VF)
866                                 max = I40E_MAX_VF - 1;
867                         for (idx = min; idx <= max; idx++) {
868                                 vf_floating_veb[idx] = true;
869                                 count++;
870                         }
871                         min = I40E_MAX_VF;
872                 } else {
873                         return -1;
874                 }
875                 floating_veb_value = end + 1;
876         } while (*end != '\0');
877
878         if (count == 0)
879                 return -1;
880
881         return 0;
882 }
883
884 static void
885 config_vf_floating_veb(struct rte_devargs *devargs,
886                        uint16_t floating_veb,
887                        bool *vf_floating_veb)
888 {
889         struct rte_kvargs *kvlist;
890         int i;
891         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
892
893         if (!floating_veb)
894                 return;
895         /* All the VFs attach to the floating VEB by default
896          * when the floating VEB is enabled.
897          */
898         for (i = 0; i < I40E_MAX_VF; i++)
899                 vf_floating_veb[i] = true;
900
901         if (devargs == NULL)
902                 return;
903
904         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
905         if (kvlist == NULL)
906                 return;
907
908         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
909                 rte_kvargs_free(kvlist);
910                 return;
911         }
912         /* When the floating_veb_list parameter exists, all the VFs
913          * will attach to the legacy VEB firstly, then configure VFs
914          * to the floating VEB according to the floating_veb_list.
915          */
916         if (rte_kvargs_process(kvlist, floating_veb_list,
917                                floating_veb_list_handler,
918                                vf_floating_veb) < 0) {
919                 rte_kvargs_free(kvlist);
920                 return;
921         }
922         rte_kvargs_free(kvlist);
923 }
924
925 static int
926 i40e_check_floating_handler(__rte_unused const char *key,
927                             const char *value,
928                             __rte_unused void *opaque)
929 {
930         if (strcmp(value, "1"))
931                 return -1;
932
933         return 0;
934 }
935
936 static int
937 is_floating_veb_supported(struct rte_devargs *devargs)
938 {
939         struct rte_kvargs *kvlist;
940         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
941
942         if (devargs == NULL)
943                 return 0;
944
945         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
946         if (kvlist == NULL)
947                 return 0;
948
949         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
950                 rte_kvargs_free(kvlist);
951                 return 0;
952         }
953         /* Floating VEB is enabled when there's key-value:
954          * enable_floating_veb=1
955          */
956         if (rte_kvargs_process(kvlist, floating_veb_key,
957                                i40e_check_floating_handler, NULL) < 0) {
958                 rte_kvargs_free(kvlist);
959                 return 0;
960         }
961         rte_kvargs_free(kvlist);
962
963         return 1;
964 }
965
966 static void
967 config_floating_veb(struct rte_eth_dev *dev)
968 {
969         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
970         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
971         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
972
973         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
974
975         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
976                 pf->floating_veb =
977                         is_floating_veb_supported(pci_dev->device.devargs);
978                 config_vf_floating_veb(pci_dev->device.devargs,
979                                        pf->floating_veb,
980                                        pf->floating_veb_list);
981         } else {
982                 pf->floating_veb = false;
983         }
984 }
985
986 #define I40E_L2_TAGS_S_TAG_SHIFT 1
987 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
988
989 static int
990 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
991 {
992         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
993         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
994         char ethertype_hash_name[RTE_HASH_NAMESIZE];
995         int ret;
996
997         struct rte_hash_parameters ethertype_hash_params = {
998                 .name = ethertype_hash_name,
999                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
1000                 .key_len = sizeof(struct i40e_ethertype_filter_input),
1001                 .hash_func = rte_hash_crc,
1002                 .hash_func_init_val = 0,
1003                 .socket_id = rte_socket_id(),
1004         };
1005
1006         /* Initialize ethertype filter rule list and hash */
1007         TAILQ_INIT(&ethertype_rule->ethertype_list);
1008         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
1009                  "ethertype_%s", dev->device->name);
1010         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
1011         if (!ethertype_rule->hash_table) {
1012                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
1013                 return -EINVAL;
1014         }
1015         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
1016                                        sizeof(struct i40e_ethertype_filter *) *
1017                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
1018                                        0);
1019         if (!ethertype_rule->hash_map) {
1020                 PMD_INIT_LOG(ERR,
1021                              "Failed to allocate memory for ethertype hash map!");
1022                 ret = -ENOMEM;
1023                 goto err_ethertype_hash_map_alloc;
1024         }
1025
1026         return 0;
1027
1028 err_ethertype_hash_map_alloc:
1029         rte_hash_free(ethertype_rule->hash_table);
1030
1031         return ret;
1032 }
1033
1034 static int
1035 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1036 {
1037         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1038         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1039         char tunnel_hash_name[RTE_HASH_NAMESIZE];
1040         int ret;
1041
1042         struct rte_hash_parameters tunnel_hash_params = {
1043                 .name = tunnel_hash_name,
1044                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
1045                 .key_len = sizeof(struct i40e_tunnel_filter_input),
1046                 .hash_func = rte_hash_crc,
1047                 .hash_func_init_val = 0,
1048                 .socket_id = rte_socket_id(),
1049         };
1050
1051         /* Initialize tunnel filter rule list and hash */
1052         TAILQ_INIT(&tunnel_rule->tunnel_list);
1053         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1054                  "tunnel_%s", dev->device->name);
1055         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1056         if (!tunnel_rule->hash_table) {
1057                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1058                 return -EINVAL;
1059         }
1060         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1061                                     sizeof(struct i40e_tunnel_filter *) *
1062                                     I40E_MAX_TUNNEL_FILTER_NUM,
1063                                     0);
1064         if (!tunnel_rule->hash_map) {
1065                 PMD_INIT_LOG(ERR,
1066                              "Failed to allocate memory for tunnel hash map!");
1067                 ret = -ENOMEM;
1068                 goto err_tunnel_hash_map_alloc;
1069         }
1070
1071         return 0;
1072
1073 err_tunnel_hash_map_alloc:
1074         rte_hash_free(tunnel_rule->hash_table);
1075
1076         return ret;
1077 }
1078
1079 static int
1080 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1081 {
1082         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1083         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1084         struct i40e_fdir_info *fdir_info = &pf->fdir;
1085         char fdir_hash_name[RTE_HASH_NAMESIZE];
1086         uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1087         uint32_t best = hw->func_caps.fd_filters_best_effort;
1088         struct rte_bitmap *bmp = NULL;
1089         uint32_t bmp_size;
1090         void *mem = NULL;
1091         uint32_t i = 0;
1092         int ret;
1093
1094         struct rte_hash_parameters fdir_hash_params = {
1095                 .name = fdir_hash_name,
1096                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1097                 .key_len = sizeof(struct i40e_fdir_input),
1098                 .hash_func = rte_hash_crc,
1099                 .hash_func_init_val = 0,
1100                 .socket_id = rte_socket_id(),
1101         };
1102
1103         /* Initialize flow director filter rule list and hash */
1104         TAILQ_INIT(&fdir_info->fdir_list);
1105         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1106                  "fdir_%s", dev->device->name);
1107         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1108         if (!fdir_info->hash_table) {
1109                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1110                 return -EINVAL;
1111         }
1112
1113         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1114                                           sizeof(struct i40e_fdir_filter *) *
1115                                           I40E_MAX_FDIR_FILTER_NUM,
1116                                           0);
1117         if (!fdir_info->hash_map) {
1118                 PMD_INIT_LOG(ERR,
1119                              "Failed to allocate memory for fdir hash map!");
1120                 ret = -ENOMEM;
1121                 goto err_fdir_hash_map_alloc;
1122         }
1123
1124         fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1125                         sizeof(struct i40e_fdir_filter) *
1126                         I40E_MAX_FDIR_FILTER_NUM,
1127                         0);
1128
1129         if (!fdir_info->fdir_filter_array) {
1130                 PMD_INIT_LOG(ERR,
1131                              "Failed to allocate memory for fdir filter array!");
1132                 ret = -ENOMEM;
1133                 goto err_fdir_filter_array_alloc;
1134         }
1135
1136         fdir_info->fdir_space_size = alloc + best;
1137         fdir_info->fdir_actual_cnt = 0;
1138         fdir_info->fdir_guarantee_total_space = alloc;
1139         fdir_info->fdir_guarantee_free_space =
1140                 fdir_info->fdir_guarantee_total_space;
1141
1142         PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1143
1144         fdir_info->fdir_flow_pool.pool =
1145                         rte_zmalloc("i40e_fdir_entry",
1146                                 sizeof(struct i40e_fdir_entry) *
1147                                 fdir_info->fdir_space_size,
1148                                 0);
1149
1150         if (!fdir_info->fdir_flow_pool.pool) {
1151                 PMD_INIT_LOG(ERR,
1152                              "Failed to allocate memory for bitmap flow!");
1153                 ret = -ENOMEM;
1154                 goto err_fdir_bitmap_flow_alloc;
1155         }
1156
1157         for (i = 0; i < fdir_info->fdir_space_size; i++)
1158                 fdir_info->fdir_flow_pool.pool[i].idx = i;
1159
1160         bmp_size =
1161                 rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1162         mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1163         if (mem == NULL) {
1164                 PMD_INIT_LOG(ERR,
1165                              "Failed to allocate memory for fdir bitmap!");
1166                 ret = -ENOMEM;
1167                 goto err_fdir_mem_alloc;
1168         }
1169         bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1170         if (bmp == NULL) {
1171                 PMD_INIT_LOG(ERR,
1172                              "Failed to initialization fdir bitmap!");
1173                 ret = -ENOMEM;
1174                 goto err_fdir_bmp_alloc;
1175         }
1176         for (i = 0; i < fdir_info->fdir_space_size; i++)
1177                 rte_bitmap_set(bmp, i);
1178
1179         fdir_info->fdir_flow_pool.bitmap = bmp;
1180
1181         return 0;
1182
1183 err_fdir_bmp_alloc:
1184         rte_free(mem);
1185 err_fdir_mem_alloc:
1186         rte_free(fdir_info->fdir_flow_pool.pool);
1187 err_fdir_bitmap_flow_alloc:
1188         rte_free(fdir_info->fdir_filter_array);
1189 err_fdir_filter_array_alloc:
1190         rte_free(fdir_info->hash_map);
1191 err_fdir_hash_map_alloc:
1192         rte_hash_free(fdir_info->hash_table);
1193
1194         return ret;
1195 }
1196
1197 static void
1198 i40e_init_customized_info(struct i40e_pf *pf)
1199 {
1200         int i;
1201
1202         /* Initialize customized pctype */
1203         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1204                 pf->customized_pctype[i].index = i;
1205                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1206                 pf->customized_pctype[i].valid = false;
1207         }
1208
1209         pf->gtp_support = false;
1210         pf->esp_support = false;
1211 }
1212
1213 static void
1214 i40e_init_filter_invalidation(struct i40e_pf *pf)
1215 {
1216         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1217         struct i40e_fdir_info *fdir_info = &pf->fdir;
1218         uint32_t glqf_ctl_reg = 0;
1219
1220         glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1221         if (!pf->support_multi_driver) {
1222                 fdir_info->fdir_invalprio = 1;
1223                 glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1224                 PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1225                 i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1226         } else {
1227                 if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1228                         fdir_info->fdir_invalprio = 1;
1229                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1230                 } else {
1231                         fdir_info->fdir_invalprio = 0;
1232                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1233                 }
1234         }
1235 }
1236
1237 void
1238 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1239 {
1240         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1241         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1242         struct i40e_queue_regions *info = &pf->queue_region;
1243         uint16_t i;
1244
1245         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1246                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1247
1248         memset(info, 0, sizeof(struct i40e_queue_regions));
1249 }
1250
1251 static int
1252 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1253                                const char *value,
1254                                void *opaque)
1255 {
1256         struct i40e_pf *pf;
1257         unsigned long support_multi_driver;
1258         char *end;
1259
1260         pf = (struct i40e_pf *)opaque;
1261
1262         errno = 0;
1263         support_multi_driver = strtoul(value, &end, 10);
1264         if (errno != 0 || end == value || *end != 0) {
1265                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1266                 return -(EINVAL);
1267         }
1268
1269         if (support_multi_driver == 1 || support_multi_driver == 0)
1270                 pf->support_multi_driver = (bool)support_multi_driver;
1271         else
1272                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1273                             "enable global configuration by default."
1274                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1275         return 0;
1276 }
1277
1278 static int
1279 i40e_support_multi_driver(struct rte_eth_dev *dev)
1280 {
1281         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1282         struct rte_kvargs *kvlist;
1283         int kvargs_count;
1284
1285         /* Enable global configuration by default */
1286         pf->support_multi_driver = false;
1287
1288         if (!dev->device->devargs)
1289                 return 0;
1290
1291         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1292         if (!kvlist)
1293                 return -EINVAL;
1294
1295         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1296         if (!kvargs_count) {
1297                 rte_kvargs_free(kvlist);
1298                 return 0;
1299         }
1300
1301         if (kvargs_count > 1)
1302                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1303                             "the first invalid or last valid one is used !",
1304                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1305
1306         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1307                                i40e_parse_multi_drv_handler, pf) < 0) {
1308                 rte_kvargs_free(kvlist);
1309                 return -EINVAL;
1310         }
1311
1312         rte_kvargs_free(kvlist);
1313         return 0;
1314 }
1315
1316 static int
1317 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1318                                     uint32_t reg_addr, uint64_t reg_val,
1319                                     struct i40e_asq_cmd_details *cmd_details)
1320 {
1321         uint64_t ori_reg_val;
1322         struct rte_eth_dev *dev;
1323         int ret;
1324
1325         ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1326         if (ret != I40E_SUCCESS) {
1327                 PMD_DRV_LOG(ERR,
1328                             "Fail to debug read from 0x%08x",
1329                             reg_addr);
1330                 return -EIO;
1331         }
1332         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
1333
1334         if (ori_reg_val != reg_val)
1335                 PMD_DRV_LOG(WARNING,
1336                             "i40e device %s changed global register [0x%08x]."
1337                             " original: 0x%"PRIx64", after: 0x%"PRIx64,
1338                             dev->device->name, reg_addr, ori_reg_val, reg_val);
1339
1340         return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1341 }
1342
1343 static int
1344 read_vf_msg_config(__rte_unused const char *key,
1345                                const char *value,
1346                                void *opaque)
1347 {
1348         struct i40e_vf_msg_cfg *cfg = opaque;
1349
1350         if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1351                         &cfg->ignore_second) != 3) {
1352                 memset(cfg, 0, sizeof(*cfg));
1353                 PMD_DRV_LOG(ERR, "format error! example: "
1354                                 "%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1355                 return -EINVAL;
1356         }
1357
1358         /*
1359          * If the message validation function been enabled, the 'period'
1360          * and 'ignore_second' must greater than 0.
1361          */
1362         if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1363                 memset(cfg, 0, sizeof(*cfg));
1364                 PMD_DRV_LOG(ERR, "%s error! the second and third"
1365                                 " number must be greater than 0!",
1366                                 ETH_I40E_VF_MSG_CFG);
1367                 return -EINVAL;
1368         }
1369
1370         return 0;
1371 }
1372
1373 static int
1374 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1375                 struct i40e_vf_msg_cfg *msg_cfg)
1376 {
1377         struct rte_kvargs *kvlist;
1378         int kvargs_count;
1379         int ret = 0;
1380
1381         memset(msg_cfg, 0, sizeof(*msg_cfg));
1382
1383         if (!dev->device->devargs)
1384                 return ret;
1385
1386         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1387         if (!kvlist)
1388                 return -EINVAL;
1389
1390         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1391         if (!kvargs_count)
1392                 goto free_end;
1393
1394         if (kvargs_count > 1) {
1395                 PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1396                                 ETH_I40E_VF_MSG_CFG);
1397                 ret = -EINVAL;
1398                 goto free_end;
1399         }
1400
1401         if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1402                         read_vf_msg_config, msg_cfg) < 0)
1403                 ret = -EINVAL;
1404
1405 free_end:
1406         rte_kvargs_free(kvlist);
1407         return ret;
1408 }
1409
1410 #define I40E_ALARM_INTERVAL 50000 /* us */
1411
1412 static int
1413 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1414 {
1415         struct rte_pci_device *pci_dev;
1416         struct rte_intr_handle *intr_handle;
1417         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1418         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1419         struct i40e_vsi *vsi;
1420         int ret;
1421         uint32_t len, val;
1422         uint8_t aq_fail = 0;
1423
1424         PMD_INIT_FUNC_TRACE();
1425
1426         dev->dev_ops = &i40e_eth_dev_ops;
1427         dev->rx_queue_count = i40e_dev_rx_queue_count;
1428         dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
1429         dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1430         dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1431         dev->rx_pkt_burst = i40e_recv_pkts;
1432         dev->tx_pkt_burst = i40e_xmit_pkts;
1433         dev->tx_pkt_prepare = i40e_prep_pkts;
1434
1435         /* for secondary processes, we don't initialise any further as primary
1436          * has already done this work. Only check we don't need a different
1437          * RX function */
1438         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1439                 i40e_set_rx_function(dev);
1440                 i40e_set_tx_function(dev);
1441                 return 0;
1442         }
1443         i40e_set_default_ptype_table(dev);
1444         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1445         intr_handle = &pci_dev->intr_handle;
1446
1447         rte_eth_copy_pci_info(dev, pci_dev);
1448         dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1449
1450         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1451         pf->adapter->eth_dev = dev;
1452         pf->dev_data = dev->data;
1453
1454         hw->back = I40E_PF_TO_ADAPTER(pf);
1455         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1456         if (!hw->hw_addr) {
1457                 PMD_INIT_LOG(ERR,
1458                         "Hardware is not available, as address is NULL");
1459                 return -ENODEV;
1460         }
1461
1462         hw->vendor_id = pci_dev->id.vendor_id;
1463         hw->device_id = pci_dev->id.device_id;
1464         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1465         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1466         hw->bus.device = pci_dev->addr.devid;
1467         hw->bus.func = pci_dev->addr.function;
1468         hw->adapter_stopped = 0;
1469         hw->adapter_closed = 0;
1470
1471         /* Init switch device pointer */
1472         hw->switch_dev = NULL;
1473
1474         /*
1475          * Switch Tag value should not be identical to either the First Tag
1476          * or Second Tag values. So set something other than common Ethertype
1477          * for internal switching.
1478          */
1479         hw->switch_tag = 0xffff;
1480
1481         val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1482         if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1483                 PMD_INIT_LOG(ERR, "\nERROR: "
1484                         "Firmware recovery mode detected. Limiting functionality.\n"
1485                         "Refer to the Intel(R) Ethernet Adapters and Devices "
1486                         "User Guide for details on firmware recovery mode.");
1487                 return -EIO;
1488         }
1489
1490         i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1491         /* Check if need to support multi-driver */
1492         i40e_support_multi_driver(dev);
1493
1494         /* Make sure all is clean before doing PF reset */
1495         i40e_clear_hw(hw);
1496
1497         /* Reset here to make sure all is clean for each PF */
1498         ret = i40e_pf_reset(hw);
1499         if (ret) {
1500                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1501                 return ret;
1502         }
1503
1504         /* Initialize the shared code (base driver) */
1505         ret = i40e_init_shared_code(hw);
1506         if (ret) {
1507                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1508                 return ret;
1509         }
1510
1511         /* Initialize the parameters for adminq */
1512         i40e_init_adminq_parameter(hw);
1513         ret = i40e_init_adminq(hw);
1514         if (ret != I40E_SUCCESS) {
1515                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1516                 return -EIO;
1517         }
1518         /* Firmware of SFP x722 does not support 802.1ad frames ability */
1519         if (hw->device_id == I40E_DEV_ID_SFP_X722 ||
1520                 hw->device_id == I40E_DEV_ID_SFP_I_X722)
1521                 hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1522
1523         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1524                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1525                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1526                      ((hw->nvm.version >> 12) & 0xf),
1527                      ((hw->nvm.version >> 4) & 0xff),
1528                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1529
1530         /* Initialize the hardware */
1531         i40e_hw_init(dev);
1532
1533         i40e_config_automask(pf);
1534
1535         i40e_set_default_pctype_table(dev);
1536
1537         /*
1538          * To work around the NVM issue, initialize registers
1539          * for packet type of QinQ by software.
1540          * It should be removed once issues are fixed in NVM.
1541          */
1542         if (!pf->support_multi_driver)
1543                 i40e_GLQF_reg_init(hw);
1544
1545         /* Initialize the input set for filters (hash and fd) to default value */
1546         i40e_filter_input_set_init(pf);
1547
1548         /* initialise the L3_MAP register */
1549         if (!pf->support_multi_driver) {
1550                 ret = i40e_aq_debug_write_global_register(hw,
1551                                                    I40E_GLQF_L3_MAP(40),
1552                                                    0x00000028,  NULL);
1553                 if (ret)
1554                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1555                                      ret);
1556                 PMD_INIT_LOG(DEBUG,
1557                              "Global register 0x%08x is changed with 0x28",
1558                              I40E_GLQF_L3_MAP(40));
1559         }
1560
1561         /* Need the special FW version to support floating VEB */
1562         config_floating_veb(dev);
1563         /* Clear PXE mode */
1564         i40e_clear_pxe_mode(hw);
1565         i40e_dev_sync_phy_type(hw);
1566
1567         /*
1568          * On X710, performance number is far from the expectation on recent
1569          * firmware versions. The fix for this issue may not be integrated in
1570          * the following firmware version. So the workaround in software driver
1571          * is needed. It needs to modify the initial values of 3 internal only
1572          * registers. Note that the workaround can be removed when it is fixed
1573          * in firmware in the future.
1574          */
1575         i40e_configure_registers(hw);
1576
1577         /* Get hw capabilities */
1578         ret = i40e_get_cap(hw);
1579         if (ret != I40E_SUCCESS) {
1580                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1581                 goto err_get_capabilities;
1582         }
1583
1584         /* Initialize parameters for PF */
1585         ret = i40e_pf_parameter_init(dev);
1586         if (ret != 0) {
1587                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1588                 goto err_parameter_init;
1589         }
1590
1591         /* Initialize the queue management */
1592         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1593         if (ret < 0) {
1594                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1595                 goto err_qp_pool_init;
1596         }
1597         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1598                                 hw->func_caps.num_msix_vectors - 1);
1599         if (ret < 0) {
1600                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1601                 goto err_msix_pool_init;
1602         }
1603
1604         /* Initialize lan hmc */
1605         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1606                                 hw->func_caps.num_rx_qp, 0, 0);
1607         if (ret != I40E_SUCCESS) {
1608                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1609                 goto err_init_lan_hmc;
1610         }
1611
1612         /* Configure lan hmc */
1613         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1614         if (ret != I40E_SUCCESS) {
1615                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1616                 goto err_configure_lan_hmc;
1617         }
1618
1619         /* Get and check the mac address */
1620         i40e_get_mac_addr(hw, hw->mac.addr);
1621         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1622                 PMD_INIT_LOG(ERR, "mac address is not valid");
1623                 ret = -EIO;
1624                 goto err_get_mac_addr;
1625         }
1626         /* Copy the permanent MAC address */
1627         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1628                         (struct rte_ether_addr *)hw->mac.perm_addr);
1629
1630         /* Disable flow control */
1631         hw->fc.requested_mode = I40E_FC_NONE;
1632         i40e_set_fc(hw, &aq_fail, TRUE);
1633
1634         /* Set the global registers with default ether type value */
1635         if (!pf->support_multi_driver) {
1636                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1637                                          RTE_ETHER_TYPE_VLAN);
1638                 if (ret != I40E_SUCCESS) {
1639                         PMD_INIT_LOG(ERR,
1640                                      "Failed to set the default outer "
1641                                      "VLAN ether type");
1642                         goto err_setup_pf_switch;
1643                 }
1644         }
1645
1646         /* PF setup, which includes VSI setup */
1647         ret = i40e_pf_setup(pf);
1648         if (ret) {
1649                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1650                 goto err_setup_pf_switch;
1651         }
1652
1653         vsi = pf->main_vsi;
1654
1655         /* Disable double vlan by default */
1656         i40e_vsi_config_double_vlan(vsi, FALSE);
1657
1658         /* Disable S-TAG identification when floating_veb is disabled */
1659         if (!pf->floating_veb) {
1660                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1661                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1662                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1663                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1664                 }
1665         }
1666
1667         if (!vsi->max_macaddrs)
1668                 len = RTE_ETHER_ADDR_LEN;
1669         else
1670                 len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1671
1672         /* Should be after VSI initialized */
1673         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1674         if (!dev->data->mac_addrs) {
1675                 PMD_INIT_LOG(ERR,
1676                         "Failed to allocated memory for storing mac address");
1677                 goto err_mac_alloc;
1678         }
1679         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1680                                         &dev->data->mac_addrs[0]);
1681
1682         /* Init dcb to sw mode by default */
1683         ret = i40e_dcb_init_configure(dev, TRUE);
1684         if (ret != I40E_SUCCESS) {
1685                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1686                 pf->flags &= ~I40E_FLAG_DCB;
1687         }
1688         /* Update HW struct after DCB configuration */
1689         i40e_get_cap(hw);
1690
1691         /* initialize pf host driver to setup SRIOV resource if applicable */
1692         i40e_pf_host_init(dev);
1693
1694         /* register callback func to eal lib */
1695         rte_intr_callback_register(intr_handle,
1696                                    i40e_dev_interrupt_handler, dev);
1697
1698         /* configure and enable device interrupt */
1699         i40e_pf_config_irq0(hw, TRUE);
1700         i40e_pf_enable_irq0(hw);
1701
1702         /* enable uio intr after callback register */
1703         rte_intr_enable(intr_handle);
1704
1705         /* By default disable flexible payload in global configuration */
1706         if (!pf->support_multi_driver)
1707                 i40e_flex_payload_reg_set_default(hw);
1708
1709         /*
1710          * Add an ethertype filter to drop all flow control frames transmitted
1711          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1712          * frames to wire.
1713          */
1714         i40e_add_tx_flow_control_drop_filter(pf);
1715
1716         /* Set the max frame size to 0x2600 by default,
1717          * in case other drivers changed the default value.
1718          */
1719         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
1720
1721         /* initialize mirror rule list */
1722         TAILQ_INIT(&pf->mirror_list);
1723
1724         /* initialize RSS rule list */
1725         TAILQ_INIT(&pf->rss_config_list);
1726
1727         /* initialize Traffic Manager configuration */
1728         i40e_tm_conf_init(dev);
1729
1730         /* Initialize customized information */
1731         i40e_init_customized_info(pf);
1732
1733         /* Initialize the filter invalidation configuration */
1734         i40e_init_filter_invalidation(pf);
1735
1736         ret = i40e_init_ethtype_filter_list(dev);
1737         if (ret < 0)
1738                 goto err_init_ethtype_filter_list;
1739         ret = i40e_init_tunnel_filter_list(dev);
1740         if (ret < 0)
1741                 goto err_init_tunnel_filter_list;
1742         ret = i40e_init_fdir_filter_list(dev);
1743         if (ret < 0)
1744                 goto err_init_fdir_filter_list;
1745
1746         /* initialize queue region configuration */
1747         i40e_init_queue_region_conf(dev);
1748
1749         /* reset all stats of the device, including pf and main vsi */
1750         i40e_dev_stats_reset(dev);
1751
1752         return 0;
1753
1754 err_init_fdir_filter_list:
1755         rte_free(pf->tunnel.hash_table);
1756         rte_free(pf->tunnel.hash_map);
1757 err_init_tunnel_filter_list:
1758         rte_free(pf->ethertype.hash_table);
1759         rte_free(pf->ethertype.hash_map);
1760 err_init_ethtype_filter_list:
1761         rte_free(dev->data->mac_addrs);
1762         dev->data->mac_addrs = NULL;
1763 err_mac_alloc:
1764         i40e_vsi_release(pf->main_vsi);
1765 err_setup_pf_switch:
1766 err_get_mac_addr:
1767 err_configure_lan_hmc:
1768         (void)i40e_shutdown_lan_hmc(hw);
1769 err_init_lan_hmc:
1770         i40e_res_pool_destroy(&pf->msix_pool);
1771 err_msix_pool_init:
1772         i40e_res_pool_destroy(&pf->qp_pool);
1773 err_qp_pool_init:
1774 err_parameter_init:
1775 err_get_capabilities:
1776         (void)i40e_shutdown_adminq(hw);
1777
1778         return ret;
1779 }
1780
1781 static void
1782 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1783 {
1784         struct i40e_ethertype_filter *p_ethertype;
1785         struct i40e_ethertype_rule *ethertype_rule;
1786
1787         ethertype_rule = &pf->ethertype;
1788         /* Remove all ethertype filter rules and hash */
1789         if (ethertype_rule->hash_map)
1790                 rte_free(ethertype_rule->hash_map);
1791         if (ethertype_rule->hash_table)
1792                 rte_hash_free(ethertype_rule->hash_table);
1793
1794         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1795                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1796                              p_ethertype, rules);
1797                 rte_free(p_ethertype);
1798         }
1799 }
1800
1801 static void
1802 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1803 {
1804         struct i40e_tunnel_filter *p_tunnel;
1805         struct i40e_tunnel_rule *tunnel_rule;
1806
1807         tunnel_rule = &pf->tunnel;
1808         /* Remove all tunnel director rules and hash */
1809         if (tunnel_rule->hash_map)
1810                 rte_free(tunnel_rule->hash_map);
1811         if (tunnel_rule->hash_table)
1812                 rte_hash_free(tunnel_rule->hash_table);
1813
1814         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1815                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1816                 rte_free(p_tunnel);
1817         }
1818 }
1819
1820 static void
1821 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1822 {
1823         struct i40e_fdir_filter *p_fdir;
1824         struct i40e_fdir_info *fdir_info;
1825
1826         fdir_info = &pf->fdir;
1827
1828         /* Remove all flow director rules */
1829         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1830                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1831 }
1832
1833 static void
1834 i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1835 {
1836         struct i40e_fdir_info *fdir_info;
1837
1838         fdir_info = &pf->fdir;
1839
1840         /* flow director memory cleanup */
1841         if (fdir_info->hash_map)
1842                 rte_free(fdir_info->hash_map);
1843         if (fdir_info->hash_table)
1844                 rte_hash_free(fdir_info->hash_table);
1845         if (fdir_info->fdir_flow_pool.bitmap)
1846                 rte_free(fdir_info->fdir_flow_pool.bitmap);
1847         if (fdir_info->fdir_flow_pool.pool)
1848                 rte_free(fdir_info->fdir_flow_pool.pool);
1849         if (fdir_info->fdir_filter_array)
1850                 rte_free(fdir_info->fdir_filter_array);
1851 }
1852
1853 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1854 {
1855         /*
1856          * Disable by default flexible payload
1857          * for corresponding L2/L3/L4 layers.
1858          */
1859         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1860         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1861         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1862 }
1863
1864 static int
1865 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1866 {
1867         struct i40e_hw *hw;
1868
1869         PMD_INIT_FUNC_TRACE();
1870
1871         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1872                 return 0;
1873
1874         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1875
1876         if (hw->adapter_closed == 0)
1877                 i40e_dev_close(dev);
1878
1879         return 0;
1880 }
1881
1882 static int
1883 i40e_dev_configure(struct rte_eth_dev *dev)
1884 {
1885         struct i40e_adapter *ad =
1886                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1887         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1888         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1889         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1890         int i, ret;
1891
1892         ret = i40e_dev_sync_phy_type(hw);
1893         if (ret)
1894                 return ret;
1895
1896         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1897          * bulk allocation or vector Rx preconditions we will reset it.
1898          */
1899         ad->rx_bulk_alloc_allowed = true;
1900         ad->rx_vec_allowed = true;
1901         ad->tx_simple_allowed = true;
1902         ad->tx_vec_allowed = true;
1903
1904         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1905                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1906
1907         /* Only legacy filter API needs the following fdir config. So when the
1908          * legacy filter API is deprecated, the following codes should also be
1909          * removed.
1910          */
1911         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1912                 ret = i40e_fdir_setup(pf);
1913                 if (ret != I40E_SUCCESS) {
1914                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1915                         return -ENOTSUP;
1916                 }
1917                 ret = i40e_fdir_configure(dev);
1918                 if (ret < 0) {
1919                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1920                         goto err;
1921                 }
1922         } else
1923                 i40e_fdir_teardown(pf);
1924
1925         ret = i40e_dev_init_vlan(dev);
1926         if (ret < 0)
1927                 goto err;
1928
1929         /* VMDQ setup.
1930          *  General PMD driver call sequence are NIC init, configure,
1931          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1932          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1933          *  applicable. So, VMDQ setting has to be done before
1934          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1935          *  For RSS setting, it will try to calculate actual configured RX queue
1936          *  number, which will be available after rx_queue_setup(). dev_start()
1937          *  function is good to place RSS setup.
1938          */
1939         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1940                 ret = i40e_vmdq_setup(dev);
1941                 if (ret)
1942                         goto err;
1943         }
1944
1945         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1946                 ret = i40e_dcb_setup(dev);
1947                 if (ret) {
1948                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1949                         goto err_dcb;
1950                 }
1951         }
1952
1953         TAILQ_INIT(&pf->flow_list);
1954
1955         return 0;
1956
1957 err_dcb:
1958         /* need to release vmdq resource if exists */
1959         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1960                 i40e_vsi_release(pf->vmdq[i].vsi);
1961                 pf->vmdq[i].vsi = NULL;
1962         }
1963         rte_free(pf->vmdq);
1964         pf->vmdq = NULL;
1965 err:
1966         /* Need to release fdir resource if exists.
1967          * Only legacy filter API needs the following fdir config. So when the
1968          * legacy filter API is deprecated, the following code should also be
1969          * removed.
1970          */
1971         i40e_fdir_teardown(pf);
1972         return ret;
1973 }
1974
1975 void
1976 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1977 {
1978         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1979         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1980         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1981         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1982         uint16_t msix_vect = vsi->msix_intr;
1983         uint16_t i;
1984
1985         for (i = 0; i < vsi->nb_qps; i++) {
1986                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1987                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1988                 rte_wmb();
1989         }
1990
1991         if (vsi->type != I40E_VSI_SRIOV) {
1992                 if (!rte_intr_allow_others(intr_handle)) {
1993                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1994                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1995                         I40E_WRITE_REG(hw,
1996                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1997                                        0);
1998                 } else {
1999                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2000                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
2001                         I40E_WRITE_REG(hw,
2002                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2003                                                        msix_vect - 1), 0);
2004                 }
2005         } else {
2006                 uint32_t reg;
2007                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2008                         vsi->user_param + (msix_vect - 1);
2009
2010                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2011                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
2012         }
2013         I40E_WRITE_FLUSH(hw);
2014 }
2015
2016 static void
2017 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
2018                        int base_queue, int nb_queue,
2019                        uint16_t itr_idx)
2020 {
2021         int i;
2022         uint32_t val;
2023         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2024         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2025
2026         /* Bind all RX queues to allocated MSIX interrupt */
2027         for (i = 0; i < nb_queue; i++) {
2028                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2029                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2030                         ((base_queue + i + 1) <<
2031                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2032                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2033                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2034
2035                 if (i == nb_queue - 1)
2036                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2037                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2038         }
2039
2040         /* Write first RX queue to Link list register as the head element */
2041         if (vsi->type != I40E_VSI_SRIOV) {
2042                 uint16_t interval =
2043                         i40e_calc_itr_interval(1, pf->support_multi_driver);
2044
2045                 if (msix_vect == I40E_MISC_VEC_ID) {
2046                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2047                                        (base_queue <<
2048                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2049                                        (0x0 <<
2050                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2051                         I40E_WRITE_REG(hw,
2052                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2053                                        interval);
2054                 } else {
2055                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2056                                        (base_queue <<
2057                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2058                                        (0x0 <<
2059                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2060                         I40E_WRITE_REG(hw,
2061                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2062                                                        msix_vect - 1),
2063                                        interval);
2064                 }
2065         } else {
2066                 uint32_t reg;
2067
2068                 if (msix_vect == I40E_MISC_VEC_ID) {
2069                         I40E_WRITE_REG(hw,
2070                                        I40E_VPINT_LNKLST0(vsi->user_param),
2071                                        (base_queue <<
2072                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2073                                        (0x0 <<
2074                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2075                 } else {
2076                         /* num_msix_vectors_vf needs to minus irq0 */
2077                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2078                                 vsi->user_param + (msix_vect - 1);
2079
2080                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2081                                        (base_queue <<
2082                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2083                                        (0x0 <<
2084                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2085                 }
2086         }
2087
2088         I40E_WRITE_FLUSH(hw);
2089 }
2090
2091 int
2092 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2093 {
2094         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2095         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2096         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2097         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2098         uint16_t msix_vect = vsi->msix_intr;
2099         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2100         uint16_t queue_idx = 0;
2101         int record = 0;
2102         int i;
2103
2104         for (i = 0; i < vsi->nb_qps; i++) {
2105                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2106                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2107         }
2108
2109         /* VF bind interrupt */
2110         if (vsi->type == I40E_VSI_SRIOV) {
2111                 if (vsi->nb_msix == 0) {
2112                         PMD_DRV_LOG(ERR, "No msix resource");
2113                         return -EINVAL;
2114                 }
2115                 __vsi_queues_bind_intr(vsi, msix_vect,
2116                                        vsi->base_queue, vsi->nb_qps,
2117                                        itr_idx);
2118                 return 0;
2119         }
2120
2121         /* PF & VMDq bind interrupt */
2122         if (rte_intr_dp_is_en(intr_handle)) {
2123                 if (vsi->type == I40E_VSI_MAIN) {
2124                         queue_idx = 0;
2125                         record = 1;
2126                 } else if (vsi->type == I40E_VSI_VMDQ2) {
2127                         struct i40e_vsi *main_vsi =
2128                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2129                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
2130                         record = 1;
2131                 }
2132         }
2133
2134         for (i = 0; i < vsi->nb_used_qps; i++) {
2135                 if (vsi->nb_msix == 0) {
2136                         PMD_DRV_LOG(ERR, "No msix resource");
2137                         return -EINVAL;
2138                 } else if (nb_msix <= 1) {
2139                         if (!rte_intr_allow_others(intr_handle))
2140                                 /* allow to share MISC_VEC_ID */
2141                                 msix_vect = I40E_MISC_VEC_ID;
2142
2143                         /* no enough msix_vect, map all to one */
2144                         __vsi_queues_bind_intr(vsi, msix_vect,
2145                                                vsi->base_queue + i,
2146                                                vsi->nb_used_qps - i,
2147                                                itr_idx);
2148                         for (; !!record && i < vsi->nb_used_qps; i++)
2149                                 intr_handle->intr_vec[queue_idx + i] =
2150                                         msix_vect;
2151                         break;
2152                 }
2153                 /* 1:1 queue/msix_vect mapping */
2154                 __vsi_queues_bind_intr(vsi, msix_vect,
2155                                        vsi->base_queue + i, 1,
2156                                        itr_idx);
2157                 if (!!record)
2158                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
2159
2160                 msix_vect++;
2161                 nb_msix--;
2162         }
2163
2164         return 0;
2165 }
2166
2167 void
2168 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2169 {
2170         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2171         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2172         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2173         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2174         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2175         uint16_t msix_intr, i;
2176
2177         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2178                 for (i = 0; i < vsi->nb_msix; i++) {
2179                         msix_intr = vsi->msix_intr + i;
2180                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2181                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
2182                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2183                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2184                 }
2185         else
2186                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2187                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
2188                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2189                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2190
2191         I40E_WRITE_FLUSH(hw);
2192 }
2193
2194 void
2195 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2196 {
2197         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2198         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2199         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2200         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2201         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2202         uint16_t msix_intr, i;
2203
2204         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2205                 for (i = 0; i < vsi->nb_msix; i++) {
2206                         msix_intr = vsi->msix_intr + i;
2207                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2208                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2209                 }
2210         else
2211                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2212                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2213
2214         I40E_WRITE_FLUSH(hw);
2215 }
2216
2217 static inline uint8_t
2218 i40e_parse_link_speeds(uint16_t link_speeds)
2219 {
2220         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2221
2222         if (link_speeds & ETH_LINK_SPEED_40G)
2223                 link_speed |= I40E_LINK_SPEED_40GB;
2224         if (link_speeds & ETH_LINK_SPEED_25G)
2225                 link_speed |= I40E_LINK_SPEED_25GB;
2226         if (link_speeds & ETH_LINK_SPEED_20G)
2227                 link_speed |= I40E_LINK_SPEED_20GB;
2228         if (link_speeds & ETH_LINK_SPEED_10G)
2229                 link_speed |= I40E_LINK_SPEED_10GB;
2230         if (link_speeds & ETH_LINK_SPEED_1G)
2231                 link_speed |= I40E_LINK_SPEED_1GB;
2232         if (link_speeds & ETH_LINK_SPEED_100M)
2233                 link_speed |= I40E_LINK_SPEED_100MB;
2234
2235         return link_speed;
2236 }
2237
2238 static int
2239 i40e_phy_conf_link(struct i40e_hw *hw,
2240                    uint8_t abilities,
2241                    uint8_t force_speed,
2242                    bool is_up)
2243 {
2244         enum i40e_status_code status;
2245         struct i40e_aq_get_phy_abilities_resp phy_ab;
2246         struct i40e_aq_set_phy_config phy_conf;
2247         enum i40e_aq_phy_type cnt;
2248         uint8_t avail_speed;
2249         uint32_t phy_type_mask = 0;
2250
2251         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2252                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2253                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2254                         I40E_AQ_PHY_FLAG_LOW_POWER;
2255         int ret = -ENOTSUP;
2256
2257         /* To get phy capabilities of available speeds. */
2258         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2259                                               NULL);
2260         if (status) {
2261                 PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2262                                 status);
2263                 return ret;
2264         }
2265         avail_speed = phy_ab.link_speed;
2266
2267         /* To get the current phy config. */
2268         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2269                                               NULL);
2270         if (status) {
2271                 PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2272                                 status);
2273                 return ret;
2274         }
2275
2276         /* If link needs to go up and it is in autoneg mode the speed is OK,
2277          * no need to set up again.
2278          */
2279         if (is_up && phy_ab.phy_type != 0 &&
2280                      abilities & I40E_AQ_PHY_AN_ENABLED &&
2281                      phy_ab.link_speed != 0)
2282                 return I40E_SUCCESS;
2283
2284         memset(&phy_conf, 0, sizeof(phy_conf));
2285
2286         /* bits 0-2 use the values from get_phy_abilities_resp */
2287         abilities &= ~mask;
2288         abilities |= phy_ab.abilities & mask;
2289
2290         phy_conf.abilities = abilities;
2291
2292         /* If link needs to go up, but the force speed is not supported,
2293          * Warn users and config the default available speeds.
2294          */
2295         if (is_up && !(force_speed & avail_speed)) {
2296                 PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2297                 phy_conf.link_speed = avail_speed;
2298         } else {
2299                 phy_conf.link_speed = is_up ? force_speed : avail_speed;
2300         }
2301
2302         /* PHY type mask needs to include each type except PHY type extension */
2303         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2304                 phy_type_mask |= 1 << cnt;
2305
2306         /* use get_phy_abilities_resp value for the rest */
2307         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2308         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2309                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2310                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
2311         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2312         phy_conf.eee_capability = phy_ab.eee_capability;
2313         phy_conf.eeer = phy_ab.eeer_val;
2314         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2315
2316         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2317                     phy_ab.abilities, phy_ab.link_speed);
2318         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2319                     phy_conf.abilities, phy_conf.link_speed);
2320
2321         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2322         if (status)
2323                 return ret;
2324
2325         return I40E_SUCCESS;
2326 }
2327
2328 static int
2329 i40e_apply_link_speed(struct rte_eth_dev *dev)
2330 {
2331         uint8_t speed;
2332         uint8_t abilities = 0;
2333         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2334         struct rte_eth_conf *conf = &dev->data->dev_conf;
2335
2336         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2337                      I40E_AQ_PHY_LINK_ENABLED;
2338
2339         if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2340                 conf->link_speeds = ETH_LINK_SPEED_40G |
2341                                     ETH_LINK_SPEED_25G |
2342                                     ETH_LINK_SPEED_20G |
2343                                     ETH_LINK_SPEED_10G |
2344                                     ETH_LINK_SPEED_1G |
2345                                     ETH_LINK_SPEED_100M;
2346
2347                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2348         } else {
2349                 abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2350         }
2351         speed = i40e_parse_link_speeds(conf->link_speeds);
2352
2353         return i40e_phy_conf_link(hw, abilities, speed, true);
2354 }
2355
2356 static int
2357 i40e_dev_start(struct rte_eth_dev *dev)
2358 {
2359         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2360         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2361         struct i40e_vsi *main_vsi = pf->main_vsi;
2362         int ret, i;
2363         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2364         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2365         uint32_t intr_vector = 0;
2366         struct i40e_vsi *vsi;
2367         uint16_t nb_rxq, nb_txq;
2368
2369         hw->adapter_stopped = 0;
2370
2371         rte_intr_disable(intr_handle);
2372
2373         if ((rte_intr_cap_multiple(intr_handle) ||
2374              !RTE_ETH_DEV_SRIOV(dev).active) &&
2375             dev->data->dev_conf.intr_conf.rxq != 0) {
2376                 intr_vector = dev->data->nb_rx_queues;
2377                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2378                 if (ret)
2379                         return ret;
2380         }
2381
2382         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2383                 intr_handle->intr_vec =
2384                         rte_zmalloc("intr_vec",
2385                                     dev->data->nb_rx_queues * sizeof(int),
2386                                     0);
2387                 if (!intr_handle->intr_vec) {
2388                         PMD_INIT_LOG(ERR,
2389                                 "Failed to allocate %d rx_queues intr_vec",
2390                                 dev->data->nb_rx_queues);
2391                         return -ENOMEM;
2392                 }
2393         }
2394
2395         /* Initialize VSI */
2396         ret = i40e_dev_rxtx_init(pf);
2397         if (ret != I40E_SUCCESS) {
2398                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2399                 return ret;
2400         }
2401
2402         /* Map queues with MSIX interrupt */
2403         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2404                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2405         ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2406         if (ret < 0)
2407                 return ret;
2408         i40e_vsi_enable_queues_intr(main_vsi);
2409
2410         /* Map VMDQ VSI queues with MSIX interrupt */
2411         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2412                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2413                 ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2414                                                 I40E_ITR_INDEX_DEFAULT);
2415                 if (ret < 0)
2416                         return ret;
2417                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2418         }
2419
2420         /* Enable all queues which have been configured */
2421         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
2422                 ret = i40e_dev_rx_queue_start(dev, nb_rxq);
2423                 if (ret)
2424                         goto rx_err;
2425         }
2426
2427         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
2428                 ret = i40e_dev_tx_queue_start(dev, nb_txq);
2429                 if (ret)
2430                         goto tx_err;
2431         }
2432
2433         /* Enable receiving broadcast packets */
2434         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2435         if (ret != I40E_SUCCESS)
2436                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2437
2438         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2439                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2440                                                 true, NULL);
2441                 if (ret != I40E_SUCCESS)
2442                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2443         }
2444
2445         /* Enable the VLAN promiscuous mode. */
2446         if (pf->vfs) {
2447                 for (i = 0; i < pf->vf_num; i++) {
2448                         vsi = pf->vfs[i].vsi;
2449                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2450                                                      true, NULL);
2451                 }
2452         }
2453
2454         /* Enable mac loopback mode */
2455         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2456             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2457                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2458                 if (ret != I40E_SUCCESS) {
2459                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2460                         goto tx_err;
2461                 }
2462         }
2463
2464         /* Apply link configure */
2465         ret = i40e_apply_link_speed(dev);
2466         if (I40E_SUCCESS != ret) {
2467                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2468                 goto tx_err;
2469         }
2470
2471         if (!rte_intr_allow_others(intr_handle)) {
2472                 rte_intr_callback_unregister(intr_handle,
2473                                              i40e_dev_interrupt_handler,
2474                                              (void *)dev);
2475                 /* configure and enable device interrupt */
2476                 i40e_pf_config_irq0(hw, FALSE);
2477                 i40e_pf_enable_irq0(hw);
2478
2479                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2480                         PMD_INIT_LOG(INFO,
2481                                 "lsc won't enable because of no intr multiplex");
2482         } else {
2483                 ret = i40e_aq_set_phy_int_mask(hw,
2484                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2485                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2486                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2487                 if (ret != I40E_SUCCESS)
2488                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2489
2490                 /* Call get_link_info aq commond to enable/disable LSE */
2491                 i40e_dev_link_update(dev, 0);
2492         }
2493
2494         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2495                 rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2496                                   i40e_dev_alarm_handler, dev);
2497         } else {
2498                 /* enable uio intr after callback register */
2499                 rte_intr_enable(intr_handle);
2500         }
2501
2502         i40e_filter_restore(pf);
2503
2504         if (pf->tm_conf.root && !pf->tm_conf.committed)
2505                 PMD_DRV_LOG(WARNING,
2506                             "please call hierarchy_commit() "
2507                             "before starting the port");
2508
2509         return I40E_SUCCESS;
2510
2511 tx_err:
2512         for (i = 0; i < nb_txq; i++)
2513                 i40e_dev_tx_queue_stop(dev, i);
2514 rx_err:
2515         for (i = 0; i < nb_rxq; i++)
2516                 i40e_dev_rx_queue_stop(dev, i);
2517
2518         return ret;
2519 }
2520
2521 static int
2522 i40e_dev_stop(struct rte_eth_dev *dev)
2523 {
2524         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2525         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2526         struct i40e_vsi *main_vsi = pf->main_vsi;
2527         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2528         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2529         int i;
2530
2531         if (hw->adapter_stopped == 1)
2532                 return 0;
2533
2534         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2535                 rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2536                 rte_intr_enable(intr_handle);
2537         }
2538
2539         /* Disable all queues */
2540         for (i = 0; i < dev->data->nb_tx_queues; i++)
2541                 i40e_dev_tx_queue_stop(dev, i);
2542
2543         for (i = 0; i < dev->data->nb_rx_queues; i++)
2544                 i40e_dev_rx_queue_stop(dev, i);
2545
2546         /* un-map queues with interrupt registers */
2547         i40e_vsi_disable_queues_intr(main_vsi);
2548         i40e_vsi_queues_unbind_intr(main_vsi);
2549
2550         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2551                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2552                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2553         }
2554
2555         /* Clear all queues and release memory */
2556         i40e_dev_clear_queues(dev);
2557
2558         /* Set link down */
2559         i40e_dev_set_link_down(dev);
2560
2561         if (!rte_intr_allow_others(intr_handle))
2562                 /* resume to the default handler */
2563                 rte_intr_callback_register(intr_handle,
2564                                            i40e_dev_interrupt_handler,
2565                                            (void *)dev);
2566
2567         /* Clean datapath event and queue/vec mapping */
2568         rte_intr_efd_disable(intr_handle);
2569         if (intr_handle->intr_vec) {
2570                 rte_free(intr_handle->intr_vec);
2571                 intr_handle->intr_vec = NULL;
2572         }
2573
2574         /* reset hierarchy commit */
2575         pf->tm_conf.committed = false;
2576
2577         hw->adapter_stopped = 1;
2578         dev->data->dev_started = 0;
2579
2580         pf->adapter->rss_reta_updated = 0;
2581
2582         return 0;
2583 }
2584
2585 static int
2586 i40e_dev_close(struct rte_eth_dev *dev)
2587 {
2588         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2589         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2590         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2591         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2592         struct i40e_mirror_rule *p_mirror;
2593         struct i40e_filter_control_settings settings;
2594         struct rte_flow *p_flow;
2595         uint32_t reg;
2596         int i;
2597         int ret;
2598         uint8_t aq_fail = 0;
2599         int retries = 0;
2600
2601         PMD_INIT_FUNC_TRACE();
2602         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2603                 return 0;
2604
2605         ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2606         if (ret)
2607                 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2608
2609
2610         ret = i40e_dev_stop(dev);
2611
2612         /* Remove all mirror rules */
2613         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2614                 ret = i40e_aq_del_mirror_rule(hw,
2615                                               pf->main_vsi->veb->seid,
2616                                               p_mirror->rule_type,
2617                                               p_mirror->entries,
2618                                               p_mirror->num_entries,
2619                                               p_mirror->id);
2620                 if (ret < 0)
2621                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2622                                     "status = %d, aq_err = %d.", ret,
2623                                     hw->aq.asq_last_status);
2624
2625                 /* remove mirror software resource anyway */
2626                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2627                 rte_free(p_mirror);
2628                 pf->nb_mirror_rule--;
2629         }
2630
2631         i40e_dev_free_queues(dev);
2632
2633         /* Disable interrupt */
2634         i40e_pf_disable_irq0(hw);
2635         rte_intr_disable(intr_handle);
2636
2637         /*
2638          * Only legacy filter API needs the following fdir config. So when the
2639          * legacy filter API is deprecated, the following code should also be
2640          * removed.
2641          */
2642         i40e_fdir_teardown(pf);
2643
2644         /* shutdown and destroy the HMC */
2645         i40e_shutdown_lan_hmc(hw);
2646
2647         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2648                 i40e_vsi_release(pf->vmdq[i].vsi);
2649                 pf->vmdq[i].vsi = NULL;
2650         }
2651         rte_free(pf->vmdq);
2652         pf->vmdq = NULL;
2653
2654         /* release all the existing VSIs and VEBs */
2655         i40e_vsi_release(pf->main_vsi);
2656
2657         /* shutdown the adminq */
2658         i40e_aq_queue_shutdown(hw, true);
2659         i40e_shutdown_adminq(hw);
2660
2661         i40e_res_pool_destroy(&pf->qp_pool);
2662         i40e_res_pool_destroy(&pf->msix_pool);
2663
2664         /* Disable flexible payload in global configuration */
2665         if (!pf->support_multi_driver)
2666                 i40e_flex_payload_reg_set_default(hw);
2667
2668         /* force a PF reset to clean anything leftover */
2669         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2670         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2671                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2672         I40E_WRITE_FLUSH(hw);
2673
2674         /* Clear PXE mode */
2675         i40e_clear_pxe_mode(hw);
2676
2677         /* Unconfigure filter control */
2678         memset(&settings, 0, sizeof(settings));
2679         ret = i40e_set_filter_control(hw, &settings);
2680         if (ret)
2681                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2682                                         ret);
2683
2684         /* Disable flow control */
2685         hw->fc.requested_mode = I40E_FC_NONE;
2686         i40e_set_fc(hw, &aq_fail, TRUE);
2687
2688         /* uninitialize pf host driver */
2689         i40e_pf_host_uninit(dev);
2690
2691         do {
2692                 ret = rte_intr_callback_unregister(intr_handle,
2693                                 i40e_dev_interrupt_handler, dev);
2694                 if (ret >= 0 || ret == -ENOENT) {
2695                         break;
2696                 } else if (ret != -EAGAIN) {
2697                         PMD_INIT_LOG(ERR,
2698                                  "intr callback unregister failed: %d",
2699                                  ret);
2700                 }
2701                 i40e_msec_delay(500);
2702         } while (retries++ < 5);
2703
2704         i40e_rm_ethtype_filter_list(pf);
2705         i40e_rm_tunnel_filter_list(pf);
2706         i40e_rm_fdir_filter_list(pf);
2707
2708         /* Remove all flows */
2709         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2710                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2711                 /* Do not free FDIR flows since they are static allocated */
2712                 if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
2713                         rte_free(p_flow);
2714         }
2715
2716         /* release the fdir static allocated memory */
2717         i40e_fdir_memory_cleanup(pf);
2718
2719         /* Remove all Traffic Manager configuration */
2720         i40e_tm_conf_uninit(dev);
2721
2722         i40e_clear_automask(pf);
2723
2724         hw->adapter_closed = 1;
2725         return ret;
2726 }
2727
2728 /*
2729  * Reset PF device only to re-initialize resources in PMD layer
2730  */
2731 static int
2732 i40e_dev_reset(struct rte_eth_dev *dev)
2733 {
2734         int ret;
2735
2736         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2737          * its VF to make them align with it. The detailed notification
2738          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2739          * To avoid unexpected behavior in VF, currently reset of PF with
2740          * SR-IOV activation is not supported. It might be supported later.
2741          */
2742         if (dev->data->sriov.active)
2743                 return -ENOTSUP;
2744
2745         ret = eth_i40e_dev_uninit(dev);
2746         if (ret)
2747                 return ret;
2748
2749         ret = eth_i40e_dev_init(dev, NULL);
2750
2751         return ret;
2752 }
2753
2754 static int
2755 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2756 {
2757         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2758         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2759         struct i40e_vsi *vsi = pf->main_vsi;
2760         int status;
2761
2762         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2763                                                      true, NULL, true);
2764         if (status != I40E_SUCCESS) {
2765                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2766                 return -EAGAIN;
2767         }
2768
2769         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2770                                                         TRUE, NULL);
2771         if (status != I40E_SUCCESS) {
2772                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2773                 /* Rollback unicast promiscuous mode */
2774                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2775                                                     false, NULL, true);
2776                 return -EAGAIN;
2777         }
2778
2779         return 0;
2780 }
2781
2782 static int
2783 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2784 {
2785         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2786         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2787         struct i40e_vsi *vsi = pf->main_vsi;
2788         int status;
2789
2790         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2791                                                      false, NULL, true);
2792         if (status != I40E_SUCCESS) {
2793                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2794                 return -EAGAIN;
2795         }
2796
2797         /* must remain in all_multicast mode */
2798         if (dev->data->all_multicast == 1)
2799                 return 0;
2800
2801         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2802                                                         false, NULL);
2803         if (status != I40E_SUCCESS) {
2804                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2805                 /* Rollback unicast promiscuous mode */
2806                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2807                                                     true, NULL, true);
2808                 return -EAGAIN;
2809         }
2810
2811         return 0;
2812 }
2813
2814 static int
2815 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2816 {
2817         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2818         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2819         struct i40e_vsi *vsi = pf->main_vsi;
2820         int ret;
2821
2822         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2823         if (ret != I40E_SUCCESS) {
2824                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2825                 return -EAGAIN;
2826         }
2827
2828         return 0;
2829 }
2830
2831 static int
2832 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2833 {
2834         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2835         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2836         struct i40e_vsi *vsi = pf->main_vsi;
2837         int ret;
2838
2839         if (dev->data->promiscuous == 1)
2840                 return 0; /* must remain in all_multicast mode */
2841
2842         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2843                                 vsi->seid, FALSE, NULL);
2844         if (ret != I40E_SUCCESS) {
2845                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2846                 return -EAGAIN;
2847         }
2848
2849         return 0;
2850 }
2851
2852 /*
2853  * Set device link up.
2854  */
2855 static int
2856 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2857 {
2858         /* re-apply link speed setting */
2859         return i40e_apply_link_speed(dev);
2860 }
2861
2862 /*
2863  * Set device link down.
2864  */
2865 static int
2866 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2867 {
2868         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2869         uint8_t abilities = 0;
2870         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2871
2872         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2873         return i40e_phy_conf_link(hw, abilities, speed, false);
2874 }
2875
2876 static __rte_always_inline void
2877 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2878 {
2879 /* Link status registers and values*/
2880 #define I40E_PRTMAC_LINKSTA             0x001E2420
2881 #define I40E_REG_LINK_UP                0x40000080
2882 #define I40E_PRTMAC_MACC                0x001E24E0
2883 #define I40E_REG_MACC_25GB              0x00020000
2884 #define I40E_REG_SPEED_MASK             0x38000000
2885 #define I40E_REG_SPEED_0                0x00000000
2886 #define I40E_REG_SPEED_1                0x08000000
2887 #define I40E_REG_SPEED_2                0x10000000
2888 #define I40E_REG_SPEED_3                0x18000000
2889 #define I40E_REG_SPEED_4                0x20000000
2890         uint32_t link_speed;
2891         uint32_t reg_val;
2892
2893         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2894         link_speed = reg_val & I40E_REG_SPEED_MASK;
2895         reg_val &= I40E_REG_LINK_UP;
2896         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2897
2898         if (unlikely(link->link_status == 0))
2899                 return;
2900
2901         /* Parse the link status */
2902         switch (link_speed) {
2903         case I40E_REG_SPEED_0:
2904                 link->link_speed = ETH_SPEED_NUM_100M;
2905                 break;
2906         case I40E_REG_SPEED_1:
2907                 link->link_speed = ETH_SPEED_NUM_1G;
2908                 break;
2909         case I40E_REG_SPEED_2:
2910                 if (hw->mac.type == I40E_MAC_X722)
2911                         link->link_speed = ETH_SPEED_NUM_2_5G;
2912                 else
2913                         link->link_speed = ETH_SPEED_NUM_10G;
2914                 break;
2915         case I40E_REG_SPEED_3:
2916                 if (hw->mac.type == I40E_MAC_X722) {
2917                         link->link_speed = ETH_SPEED_NUM_5G;
2918                 } else {
2919                         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2920
2921                         if (reg_val & I40E_REG_MACC_25GB)
2922                                 link->link_speed = ETH_SPEED_NUM_25G;
2923                         else
2924                                 link->link_speed = ETH_SPEED_NUM_40G;
2925                 }
2926                 break;
2927         case I40E_REG_SPEED_4:
2928                 if (hw->mac.type == I40E_MAC_X722)
2929                         link->link_speed = ETH_SPEED_NUM_10G;
2930                 else
2931                         link->link_speed = ETH_SPEED_NUM_20G;
2932                 break;
2933         default:
2934                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2935                 break;
2936         }
2937 }
2938
2939 static __rte_always_inline void
2940 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2941         bool enable_lse, int wait_to_complete)
2942 {
2943 #define CHECK_INTERVAL             100  /* 100ms */
2944 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2945         uint32_t rep_cnt = MAX_REPEAT_TIME;
2946         struct i40e_link_status link_status;
2947         int status;
2948
2949         memset(&link_status, 0, sizeof(link_status));
2950
2951         do {
2952                 memset(&link_status, 0, sizeof(link_status));
2953
2954                 /* Get link status information from hardware */
2955                 status = i40e_aq_get_link_info(hw, enable_lse,
2956                                                 &link_status, NULL);
2957                 if (unlikely(status != I40E_SUCCESS)) {
2958                         link->link_speed = ETH_SPEED_NUM_NONE;
2959                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2960                         PMD_DRV_LOG(ERR, "Failed to get link info");
2961                         return;
2962                 }
2963
2964                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2965                 if (!wait_to_complete || link->link_status)
2966                         break;
2967
2968                 rte_delay_ms(CHECK_INTERVAL);
2969         } while (--rep_cnt);
2970
2971         /* Parse the link status */
2972         switch (link_status.link_speed) {
2973         case I40E_LINK_SPEED_100MB:
2974                 link->link_speed = ETH_SPEED_NUM_100M;
2975                 break;
2976         case I40E_LINK_SPEED_1GB:
2977                 link->link_speed = ETH_SPEED_NUM_1G;
2978                 break;
2979         case I40E_LINK_SPEED_10GB:
2980                 link->link_speed = ETH_SPEED_NUM_10G;
2981                 break;
2982         case I40E_LINK_SPEED_20GB:
2983                 link->link_speed = ETH_SPEED_NUM_20G;
2984                 break;
2985         case I40E_LINK_SPEED_25GB:
2986                 link->link_speed = ETH_SPEED_NUM_25G;
2987                 break;
2988         case I40E_LINK_SPEED_40GB:
2989                 link->link_speed = ETH_SPEED_NUM_40G;
2990                 break;
2991         default:
2992                 if (link->link_status)
2993                         link->link_speed = ETH_SPEED_NUM_UNKNOWN;
2994                 else
2995                         link->link_speed = ETH_SPEED_NUM_NONE;
2996                 break;
2997         }
2998 }
2999
3000 int
3001 i40e_dev_link_update(struct rte_eth_dev *dev,
3002                      int wait_to_complete)
3003 {
3004         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3005         struct rte_eth_link link;
3006         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3007         int ret;
3008
3009         memset(&link, 0, sizeof(link));
3010
3011         /* i40e uses full duplex only */
3012         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3013         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3014                         ETH_LINK_SPEED_FIXED);
3015
3016         if (!wait_to_complete && !enable_lse)
3017                 update_link_reg(hw, &link);
3018         else
3019                 update_link_aq(hw, &link, enable_lse, wait_to_complete);
3020
3021         if (hw->switch_dev)
3022                 rte_eth_linkstatus_get(hw->switch_dev, &link);
3023
3024         ret = rte_eth_linkstatus_set(dev, &link);
3025         i40e_notify_all_vfs_link_status(dev);
3026
3027         return ret;
3028 }
3029
3030 static void
3031 i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
3032                           uint32_t loreg, bool offset_loaded, uint64_t *offset,
3033                           uint64_t *stat, uint64_t *prev_stat)
3034 {
3035         i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
3036         /* enlarge the limitation when statistics counters overflowed */
3037         if (offset_loaded) {
3038                 if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
3039                         *stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
3040                 *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
3041         }
3042         *prev_stat = *stat;
3043 }
3044
3045 /* Get all the statistics of a VSI */
3046 void
3047 i40e_update_vsi_stats(struct i40e_vsi *vsi)
3048 {
3049         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3050         struct i40e_eth_stats *nes = &vsi->eth_stats;
3051         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3052         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3053
3054         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3055                                   vsi->offset_loaded, &oes->rx_bytes,
3056                                   &nes->rx_bytes, &vsi->prev_rx_bytes);
3057         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3058                             vsi->offset_loaded, &oes->rx_unicast,
3059                             &nes->rx_unicast);
3060         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3061                             vsi->offset_loaded, &oes->rx_multicast,
3062                             &nes->rx_multicast);
3063         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3064                             vsi->offset_loaded, &oes->rx_broadcast,
3065                             &nes->rx_broadcast);
3066         /* exclude CRC bytes */
3067         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3068                 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3069
3070         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3071                             &oes->rx_discards, &nes->rx_discards);
3072         /* GLV_REPC not supported */
3073         /* GLV_RMPC not supported */
3074         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3075                             &oes->rx_unknown_protocol,
3076                             &nes->rx_unknown_protocol);
3077         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3078                                   vsi->offset_loaded, &oes->tx_bytes,
3079                                   &nes->tx_bytes, &vsi->prev_tx_bytes);
3080         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3081                             vsi->offset_loaded, &oes->tx_unicast,
3082                             &nes->tx_unicast);
3083         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3084                             vsi->offset_loaded, &oes->tx_multicast,
3085                             &nes->tx_multicast);
3086         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3087                             vsi->offset_loaded,  &oes->tx_broadcast,
3088                             &nes->tx_broadcast);
3089         /* GLV_TDPC not supported */
3090         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3091                             &oes->tx_errors, &nes->tx_errors);
3092         vsi->offset_loaded = true;
3093
3094         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3095                     vsi->vsi_id);
3096         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3097         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3098         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3099         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3100         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3101         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3102                     nes->rx_unknown_protocol);
3103         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3104         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3105         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3106         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3107         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3108         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3109         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3110                     vsi->vsi_id);
3111 }
3112
3113 static void
3114 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3115 {
3116         unsigned int i;
3117         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3118         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
3119
3120         /* Get rx/tx bytes of internal transfer packets */
3121         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
3122                                   I40E_GLV_GORCL(hw->port),
3123                                   pf->offset_loaded,
3124                                   &pf->internal_stats_offset.rx_bytes,
3125                                   &pf->internal_stats.rx_bytes,
3126                                   &pf->internal_prev_rx_bytes);
3127         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
3128                                   I40E_GLV_GOTCL(hw->port),
3129                                   pf->offset_loaded,
3130                                   &pf->internal_stats_offset.tx_bytes,
3131                                   &pf->internal_stats.tx_bytes,
3132                                   &pf->internal_prev_tx_bytes);
3133         /* Get total internal rx packet count */
3134         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3135                             I40E_GLV_UPRCL(hw->port),
3136                             pf->offset_loaded,
3137                             &pf->internal_stats_offset.rx_unicast,
3138                             &pf->internal_stats.rx_unicast);
3139         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3140                             I40E_GLV_MPRCL(hw->port),
3141                             pf->offset_loaded,
3142                             &pf->internal_stats_offset.rx_multicast,
3143                             &pf->internal_stats.rx_multicast);
3144         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3145                             I40E_GLV_BPRCL(hw->port),
3146                             pf->offset_loaded,
3147                             &pf->internal_stats_offset.rx_broadcast,
3148                             &pf->internal_stats.rx_broadcast);
3149         /* Get total internal tx packet count */
3150         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3151                             I40E_GLV_UPTCL(hw->port),
3152                             pf->offset_loaded,
3153                             &pf->internal_stats_offset.tx_unicast,
3154                             &pf->internal_stats.tx_unicast);
3155         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3156                             I40E_GLV_MPTCL(hw->port),
3157                             pf->offset_loaded,
3158                             &pf->internal_stats_offset.tx_multicast,
3159                             &pf->internal_stats.tx_multicast);
3160         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3161                             I40E_GLV_BPTCL(hw->port),
3162                             pf->offset_loaded,
3163                             &pf->internal_stats_offset.tx_broadcast,
3164                             &pf->internal_stats.tx_broadcast);
3165
3166         /* exclude CRC size */
3167         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3168                 pf->internal_stats.rx_multicast +
3169                 pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3170
3171         /* Get statistics of struct i40e_eth_stats */
3172         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3173                                   I40E_GLPRT_GORCL(hw->port),
3174                                   pf->offset_loaded, &os->eth.rx_bytes,
3175                                   &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3176         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3177                             I40E_GLPRT_UPRCL(hw->port),
3178                             pf->offset_loaded, &os->eth.rx_unicast,
3179                             &ns->eth.rx_unicast);
3180         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3181                             I40E_GLPRT_MPRCL(hw->port),
3182                             pf->offset_loaded, &os->eth.rx_multicast,
3183                             &ns->eth.rx_multicast);
3184         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3185                             I40E_GLPRT_BPRCL(hw->port),
3186                             pf->offset_loaded, &os->eth.rx_broadcast,
3187                             &ns->eth.rx_broadcast);
3188         /* Workaround: CRC size should not be included in byte statistics,
3189          * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3190          * packet.
3191          */
3192         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3193                 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3194
3195         /* exclude internal rx bytes
3196          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
3197          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
3198          * value.
3199          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
3200          */
3201         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3202                 ns->eth.rx_bytes = 0;
3203         else
3204                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3205
3206         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3207                 ns->eth.rx_unicast = 0;
3208         else
3209                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3210
3211         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3212                 ns->eth.rx_multicast = 0;
3213         else
3214                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3215
3216         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3217                 ns->eth.rx_broadcast = 0;
3218         else
3219                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3220
3221         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3222                             pf->offset_loaded, &os->eth.rx_discards,
3223                             &ns->eth.rx_discards);
3224         /* GLPRT_REPC not supported */
3225         /* GLPRT_RMPC not supported */
3226         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3227                             pf->offset_loaded,
3228                             &os->eth.rx_unknown_protocol,
3229                             &ns->eth.rx_unknown_protocol);
3230         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3231                                   I40E_GLPRT_GOTCL(hw->port),
3232                                   pf->offset_loaded, &os->eth.tx_bytes,
3233                                   &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3234         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3235                             I40E_GLPRT_UPTCL(hw->port),
3236                             pf->offset_loaded, &os->eth.tx_unicast,
3237                             &ns->eth.tx_unicast);
3238         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3239                             I40E_GLPRT_MPTCL(hw->port),
3240                             pf->offset_loaded, &os->eth.tx_multicast,
3241                             &ns->eth.tx_multicast);
3242         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3243                             I40E_GLPRT_BPTCL(hw->port),
3244                             pf->offset_loaded, &os->eth.tx_broadcast,
3245                             &ns->eth.tx_broadcast);
3246         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3247                 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3248
3249         /* exclude internal tx bytes
3250          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3251          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3252          * value.
3253          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3254          */
3255         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3256                 ns->eth.tx_bytes = 0;
3257         else
3258                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3259
3260         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3261                 ns->eth.tx_unicast = 0;
3262         else
3263                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3264
3265         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3266                 ns->eth.tx_multicast = 0;
3267         else
3268                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3269
3270         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3271                 ns->eth.tx_broadcast = 0;
3272         else
3273                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3274
3275         /* GLPRT_TEPC not supported */
3276
3277         /* additional port specific stats */
3278         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3279                             pf->offset_loaded, &os->tx_dropped_link_down,
3280                             &ns->tx_dropped_link_down);
3281         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3282                             pf->offset_loaded, &os->crc_errors,
3283                             &ns->crc_errors);
3284         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3285                             pf->offset_loaded, &os->illegal_bytes,
3286                             &ns->illegal_bytes);
3287         /* GLPRT_ERRBC not supported */
3288         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3289                             pf->offset_loaded, &os->mac_local_faults,
3290                             &ns->mac_local_faults);
3291         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3292                             pf->offset_loaded, &os->mac_remote_faults,
3293                             &ns->mac_remote_faults);
3294         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3295                             pf->offset_loaded, &os->rx_length_errors,
3296                             &ns->rx_length_errors);
3297         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3298                             pf->offset_loaded, &os->link_xon_rx,
3299                             &ns->link_xon_rx);
3300         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3301                             pf->offset_loaded, &os->link_xoff_rx,
3302                             &ns->link_xoff_rx);
3303         for (i = 0; i < 8; i++) {
3304                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3305                                     pf->offset_loaded,
3306                                     &os->priority_xon_rx[i],
3307                                     &ns->priority_xon_rx[i]);
3308                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3309                                     pf->offset_loaded,
3310                                     &os->priority_xoff_rx[i],
3311                                     &ns->priority_xoff_rx[i]);
3312         }
3313         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3314                             pf->offset_loaded, &os->link_xon_tx,
3315                             &ns->link_xon_tx);
3316         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3317                             pf->offset_loaded, &os->link_xoff_tx,
3318                             &ns->link_xoff_tx);
3319         for (i = 0; i < 8; i++) {
3320                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3321                                     pf->offset_loaded,
3322                                     &os->priority_xon_tx[i],
3323                                     &ns->priority_xon_tx[i]);
3324                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3325                                     pf->offset_loaded,
3326                                     &os->priority_xoff_tx[i],
3327                                     &ns->priority_xoff_tx[i]);
3328                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3329                                     pf->offset_loaded,
3330                                     &os->priority_xon_2_xoff[i],
3331                                     &ns->priority_xon_2_xoff[i]);
3332         }
3333         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3334                             I40E_GLPRT_PRC64L(hw->port),
3335                             pf->offset_loaded, &os->rx_size_64,
3336                             &ns->rx_size_64);
3337         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3338                             I40E_GLPRT_PRC127L(hw->port),
3339                             pf->offset_loaded, &os->rx_size_127,
3340                             &ns->rx_size_127);
3341         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3342                             I40E_GLPRT_PRC255L(hw->port),
3343                             pf->offset_loaded, &os->rx_size_255,
3344                             &ns->rx_size_255);
3345         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3346                             I40E_GLPRT_PRC511L(hw->port),
3347                             pf->offset_loaded, &os->rx_size_511,
3348                             &ns->rx_size_511);
3349         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3350                             I40E_GLPRT_PRC1023L(hw->port),
3351                             pf->offset_loaded, &os->rx_size_1023,
3352                             &ns->rx_size_1023);
3353         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3354                             I40E_GLPRT_PRC1522L(hw->port),
3355                             pf->offset_loaded, &os->rx_size_1522,
3356                             &ns->rx_size_1522);
3357         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3358                             I40E_GLPRT_PRC9522L(hw->port),
3359                             pf->offset_loaded, &os->rx_size_big,
3360                             &ns->rx_size_big);
3361         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3362                             pf->offset_loaded, &os->rx_undersize,
3363                             &ns->rx_undersize);
3364         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3365                             pf->offset_loaded, &os->rx_fragments,
3366                             &ns->rx_fragments);
3367         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3368                             pf->offset_loaded, &os->rx_oversize,
3369                             &ns->rx_oversize);
3370         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3371                             pf->offset_loaded, &os->rx_jabber,
3372                             &ns->rx_jabber);
3373         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3374                             I40E_GLPRT_PTC64L(hw->port),
3375                             pf->offset_loaded, &os->tx_size_64,
3376                             &ns->tx_size_64);
3377         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3378                             I40E_GLPRT_PTC127L(hw->port),
3379                             pf->offset_loaded, &os->tx_size_127,
3380                             &ns->tx_size_127);
3381         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3382                             I40E_GLPRT_PTC255L(hw->port),
3383                             pf->offset_loaded, &os->tx_size_255,
3384                             &ns->tx_size_255);
3385         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3386                             I40E_GLPRT_PTC511L(hw->port),
3387                             pf->offset_loaded, &os->tx_size_511,
3388                             &ns->tx_size_511);
3389         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3390                             I40E_GLPRT_PTC1023L(hw->port),
3391                             pf->offset_loaded, &os->tx_size_1023,
3392                             &ns->tx_size_1023);
3393         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3394                             I40E_GLPRT_PTC1522L(hw->port),
3395                             pf->offset_loaded, &os->tx_size_1522,
3396                             &ns->tx_size_1522);
3397         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3398                             I40E_GLPRT_PTC9522L(hw->port),
3399                             pf->offset_loaded, &os->tx_size_big,
3400                             &ns->tx_size_big);
3401         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3402                            pf->offset_loaded,
3403                            &os->fd_sb_match, &ns->fd_sb_match);
3404         /* GLPRT_MSPDC not supported */
3405         /* GLPRT_XEC not supported */
3406
3407         pf->offset_loaded = true;
3408
3409         if (pf->main_vsi)
3410                 i40e_update_vsi_stats(pf->main_vsi);
3411 }
3412
3413 /* Get all statistics of a port */
3414 static int
3415 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3416 {
3417         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3418         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3419         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3420         struct i40e_vsi *vsi;
3421         unsigned i;
3422
3423         /* call read registers - updates values, now write them to struct */
3424         i40e_read_stats_registers(pf, hw);
3425
3426         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3427                         pf->main_vsi->eth_stats.rx_multicast +
3428                         pf->main_vsi->eth_stats.rx_broadcast -
3429                         pf->main_vsi->eth_stats.rx_discards;
3430         stats->opackets = ns->eth.tx_unicast +
3431                         ns->eth.tx_multicast +
3432                         ns->eth.tx_broadcast;
3433         stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3434         stats->obytes   = ns->eth.tx_bytes;
3435         stats->oerrors  = ns->eth.tx_errors +
3436                         pf->main_vsi->eth_stats.tx_errors;
3437
3438         /* Rx Errors */
3439         stats->imissed  = ns->eth.rx_discards +
3440                         pf->main_vsi->eth_stats.rx_discards;
3441         stats->ierrors  = ns->crc_errors +
3442                         ns->rx_length_errors + ns->rx_undersize +
3443                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3444
3445         if (pf->vfs) {
3446                 for (i = 0; i < pf->vf_num; i++) {
3447                         vsi = pf->vfs[i].vsi;
3448                         i40e_update_vsi_stats(vsi);
3449
3450                         stats->ipackets += (vsi->eth_stats.rx_unicast +
3451                                         vsi->eth_stats.rx_multicast +
3452                                         vsi->eth_stats.rx_broadcast -
3453                                         vsi->eth_stats.rx_discards);
3454                         stats->ibytes   += vsi->eth_stats.rx_bytes;
3455                         stats->oerrors  += vsi->eth_stats.tx_errors;
3456                         stats->imissed  += vsi->eth_stats.rx_discards;
3457                 }
3458         }
3459
3460         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3461         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3462         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3463         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3464         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3465         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3466         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3467                     ns->eth.rx_unknown_protocol);
3468         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3469         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3470         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3471         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3472         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3473         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3474
3475         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3476                     ns->tx_dropped_link_down);
3477         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3478         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3479                     ns->illegal_bytes);
3480         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3481         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3482                     ns->mac_local_faults);
3483         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3484                     ns->mac_remote_faults);
3485         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3486                     ns->rx_length_errors);
3487         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3488         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3489         for (i = 0; i < 8; i++) {
3490                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3491                                 i, ns->priority_xon_rx[i]);
3492                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3493                                 i, ns->priority_xoff_rx[i]);
3494         }
3495         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3496         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3497         for (i = 0; i < 8; i++) {
3498                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3499                                 i, ns->priority_xon_tx[i]);
3500                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3501                                 i, ns->priority_xoff_tx[i]);
3502                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3503                                 i, ns->priority_xon_2_xoff[i]);
3504         }
3505         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3506         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3507         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3508         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3509         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3510         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3511         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3512         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3513         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3514         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3515         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3516         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3517         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3518         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3519         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3520         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3521         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3522         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3523         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3524                         ns->mac_short_packet_dropped);
3525         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3526                     ns->checksum_error);
3527         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3528         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3529         return 0;
3530 }
3531
3532 /* Reset the statistics */
3533 static int
3534 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3535 {
3536         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3537         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3538
3539         /* Mark PF and VSI stats to update the offset, aka "reset" */
3540         pf->offset_loaded = false;
3541         if (pf->main_vsi)
3542                 pf->main_vsi->offset_loaded = false;
3543
3544         /* read the stats, reading current register values into offset */
3545         i40e_read_stats_registers(pf, hw);
3546
3547         return 0;
3548 }
3549
3550 static uint32_t
3551 i40e_xstats_calc_num(void)
3552 {
3553         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3554                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3555                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3556 }
3557
3558 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3559                                      struct rte_eth_xstat_name *xstats_names,
3560                                      __rte_unused unsigned limit)
3561 {
3562         unsigned count = 0;
3563         unsigned i, prio;
3564
3565         if (xstats_names == NULL)
3566                 return i40e_xstats_calc_num();
3567
3568         /* Note: limit checked in rte_eth_xstats_names() */
3569
3570         /* Get stats from i40e_eth_stats struct */
3571         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3572                 strlcpy(xstats_names[count].name,
3573                         rte_i40e_stats_strings[i].name,
3574                         sizeof(xstats_names[count].name));
3575                 count++;
3576         }
3577
3578         /* Get individiual stats from i40e_hw_port struct */
3579         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3580                 strlcpy(xstats_names[count].name,
3581                         rte_i40e_hw_port_strings[i].name,
3582                         sizeof(xstats_names[count].name));
3583                 count++;
3584         }
3585
3586         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3587                 for (prio = 0; prio < 8; prio++) {
3588                         snprintf(xstats_names[count].name,
3589                                  sizeof(xstats_names[count].name),
3590                                  "rx_priority%u_%s", prio,
3591                                  rte_i40e_rxq_prio_strings[i].name);
3592                         count++;
3593                 }
3594         }
3595
3596         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3597                 for (prio = 0; prio < 8; prio++) {
3598                         snprintf(xstats_names[count].name,
3599                                  sizeof(xstats_names[count].name),
3600                                  "tx_priority%u_%s", prio,
3601                                  rte_i40e_txq_prio_strings[i].name);
3602                         count++;
3603                 }
3604         }
3605         return count;
3606 }
3607
3608 static int
3609 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3610                     unsigned n)
3611 {
3612         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3613         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3614         unsigned i, count, prio;
3615         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3616
3617         count = i40e_xstats_calc_num();
3618         if (n < count)
3619                 return count;
3620
3621         i40e_read_stats_registers(pf, hw);
3622
3623         if (xstats == NULL)
3624                 return 0;
3625
3626         count = 0;
3627
3628         /* Get stats from i40e_eth_stats struct */
3629         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3630                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3631                         rte_i40e_stats_strings[i].offset);
3632                 xstats[count].id = count;
3633                 count++;
3634         }
3635
3636         /* Get individiual stats from i40e_hw_port struct */
3637         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3638                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3639                         rte_i40e_hw_port_strings[i].offset);
3640                 xstats[count].id = count;
3641                 count++;
3642         }
3643
3644         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3645                 for (prio = 0; prio < 8; prio++) {
3646                         xstats[count].value =
3647                                 *(uint64_t *)(((char *)hw_stats) +
3648                                 rte_i40e_rxq_prio_strings[i].offset +
3649                                 (sizeof(uint64_t) * prio));
3650                         xstats[count].id = count;
3651                         count++;
3652                 }
3653         }
3654
3655         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3656                 for (prio = 0; prio < 8; prio++) {
3657                         xstats[count].value =
3658                                 *(uint64_t *)(((char *)hw_stats) +
3659                                 rte_i40e_txq_prio_strings[i].offset +
3660                                 (sizeof(uint64_t) * prio));
3661                         xstats[count].id = count;
3662                         count++;
3663                 }
3664         }
3665
3666         return count;
3667 }
3668
3669 static int
3670 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3671 {
3672         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3673         u32 full_ver;
3674         u8 ver, patch;
3675         u16 build;
3676         int ret;
3677
3678         full_ver = hw->nvm.oem_ver;
3679         ver = (u8)(full_ver >> 24);
3680         build = (u16)((full_ver >> 8) & 0xffff);
3681         patch = (u8)(full_ver & 0xff);
3682
3683         ret = snprintf(fw_version, fw_size,
3684                  "%d.%d%d 0x%08x %d.%d.%d",
3685                  ((hw->nvm.version >> 12) & 0xf),
3686                  ((hw->nvm.version >> 4) & 0xff),
3687                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3688                  ver, build, patch);
3689
3690         ret += 1; /* add the size of '\0' */
3691         if (fw_size < (u32)ret)
3692                 return ret;
3693         else
3694                 return 0;
3695 }
3696
3697 /*
3698  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3699  * the Rx data path does not hang if the FW LLDP is stopped.
3700  * return true if lldp need to stop
3701  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3702  */
3703 static bool
3704 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3705 {
3706         double nvm_ver;
3707         char ver_str[64] = {0};
3708         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3709
3710         i40e_fw_version_get(dev, ver_str, 64);
3711         nvm_ver = atof(ver_str);
3712         if ((hw->mac.type == I40E_MAC_X722 ||
3713              hw->mac.type == I40E_MAC_X722_VF) &&
3714              ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3715                 return true;
3716         else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3717                 return true;
3718
3719         return false;
3720 }
3721
3722 static int
3723 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3724 {
3725         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3726         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3727         struct i40e_vsi *vsi = pf->main_vsi;
3728         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3729
3730         dev_info->max_rx_queues = vsi->nb_qps;
3731         dev_info->max_tx_queues = vsi->nb_qps;
3732         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3733         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3734         dev_info->max_mac_addrs = vsi->max_macaddrs;
3735         dev_info->max_vfs = pci_dev->max_vfs;
3736         dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3737         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3738         dev_info->rx_queue_offload_capa = 0;
3739         dev_info->rx_offload_capa =
3740                 DEV_RX_OFFLOAD_VLAN_STRIP |
3741                 DEV_RX_OFFLOAD_QINQ_STRIP |
3742                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3743                 DEV_RX_OFFLOAD_UDP_CKSUM |
3744                 DEV_RX_OFFLOAD_TCP_CKSUM |
3745                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3746                 DEV_RX_OFFLOAD_KEEP_CRC |
3747                 DEV_RX_OFFLOAD_SCATTER |
3748                 DEV_RX_OFFLOAD_VLAN_EXTEND |
3749                 DEV_RX_OFFLOAD_VLAN_FILTER |
3750                 DEV_RX_OFFLOAD_JUMBO_FRAME |
3751                 DEV_RX_OFFLOAD_RSS_HASH;
3752
3753         dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3754         dev_info->tx_offload_capa =
3755                 DEV_TX_OFFLOAD_VLAN_INSERT |
3756                 DEV_TX_OFFLOAD_QINQ_INSERT |
3757                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3758                 DEV_TX_OFFLOAD_UDP_CKSUM |
3759                 DEV_TX_OFFLOAD_TCP_CKSUM |
3760                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3761                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3762                 DEV_TX_OFFLOAD_TCP_TSO |
3763                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3764                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3765                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3766                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3767                 DEV_TX_OFFLOAD_MULTI_SEGS |
3768                 dev_info->tx_queue_offload_capa;
3769         dev_info->dev_capa =
3770                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3771                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3772
3773         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3774                                                 sizeof(uint32_t);
3775         dev_info->reta_size = pf->hash_lut_size;
3776         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3777
3778         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3779                 .rx_thresh = {
3780                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3781                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3782                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3783                 },
3784                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3785                 .rx_drop_en = 0,
3786                 .offloads = 0,
3787         };
3788
3789         dev_info->default_txconf = (struct rte_eth_txconf) {
3790                 .tx_thresh = {
3791                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3792                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3793                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3794                 },
3795                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3796                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3797                 .offloads = 0,
3798         };
3799
3800         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3801                 .nb_max = I40E_MAX_RING_DESC,
3802                 .nb_min = I40E_MIN_RING_DESC,
3803                 .nb_align = I40E_ALIGN_RING_DESC,
3804         };
3805
3806         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3807                 .nb_max = I40E_MAX_RING_DESC,
3808                 .nb_min = I40E_MIN_RING_DESC,
3809                 .nb_align = I40E_ALIGN_RING_DESC,
3810                 .nb_seg_max = I40E_TX_MAX_SEG,
3811                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3812         };
3813
3814         if (pf->flags & I40E_FLAG_VMDQ) {
3815                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3816                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3817                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3818                                                 pf->max_nb_vmdq_vsi;
3819                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3820                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3821                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3822         }
3823
3824         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3825                 /* For XL710 */
3826                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3827                 dev_info->default_rxportconf.nb_queues = 2;
3828                 dev_info->default_txportconf.nb_queues = 2;
3829                 if (dev->data->nb_rx_queues == 1)
3830                         dev_info->default_rxportconf.ring_size = 2048;
3831                 else
3832                         dev_info->default_rxportconf.ring_size = 1024;
3833                 if (dev->data->nb_tx_queues == 1)
3834                         dev_info->default_txportconf.ring_size = 1024;
3835                 else
3836                         dev_info->default_txportconf.ring_size = 512;
3837
3838         } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3839                 /* For XXV710 */
3840                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3841                 dev_info->default_rxportconf.nb_queues = 1;
3842                 dev_info->default_txportconf.nb_queues = 1;
3843                 dev_info->default_rxportconf.ring_size = 256;
3844                 dev_info->default_txportconf.ring_size = 256;
3845         } else {
3846                 /* For X710 */
3847                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3848                 dev_info->default_rxportconf.nb_queues = 1;
3849                 dev_info->default_txportconf.nb_queues = 1;
3850                 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3851                         dev_info->default_rxportconf.ring_size = 512;
3852                         dev_info->default_txportconf.ring_size = 256;
3853                 } else {
3854                         dev_info->default_rxportconf.ring_size = 256;
3855                         dev_info->default_txportconf.ring_size = 256;
3856                 }
3857         }
3858         dev_info->default_rxportconf.burst_size = 32;
3859         dev_info->default_txportconf.burst_size = 32;
3860
3861         return 0;
3862 }
3863
3864 static int
3865 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3866 {
3867         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3868         struct i40e_vsi *vsi = pf->main_vsi;
3869         PMD_INIT_FUNC_TRACE();
3870
3871         if (on)
3872                 return i40e_vsi_add_vlan(vsi, vlan_id);
3873         else
3874                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3875 }
3876
3877 static int
3878 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3879                                 enum rte_vlan_type vlan_type,
3880                                 uint16_t tpid, int qinq)
3881 {
3882         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3883         uint64_t reg_r = 0;
3884         uint64_t reg_w = 0;
3885         uint16_t reg_id = 3;
3886         int ret;
3887
3888         if (qinq) {
3889                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3890                         reg_id = 2;
3891         }
3892
3893         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3894                                           &reg_r, NULL);
3895         if (ret != I40E_SUCCESS) {
3896                 PMD_DRV_LOG(ERR,
3897                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3898                            reg_id);
3899                 return -EIO;
3900         }
3901         PMD_DRV_LOG(DEBUG,
3902                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3903                     reg_id, reg_r);
3904
3905         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3906         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3907         if (reg_r == reg_w) {
3908                 PMD_DRV_LOG(DEBUG, "No need to write");
3909                 return 0;
3910         }
3911
3912         ret = i40e_aq_debug_write_global_register(hw,
3913                                            I40E_GL_SWT_L2TAGCTRL(reg_id),
3914                                            reg_w, NULL);
3915         if (ret != I40E_SUCCESS) {
3916                 PMD_DRV_LOG(ERR,
3917                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3918                             reg_id);
3919                 return -EIO;
3920         }
3921         PMD_DRV_LOG(DEBUG,
3922                     "Global register 0x%08x is changed with value 0x%08x",
3923                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3924
3925         return 0;
3926 }
3927
3928 static int
3929 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3930                    enum rte_vlan_type vlan_type,
3931                    uint16_t tpid)
3932 {
3933         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3934         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3935         int qinq = dev->data->dev_conf.rxmode.offloads &
3936                    DEV_RX_OFFLOAD_VLAN_EXTEND;
3937         int ret = 0;
3938
3939         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3940              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3941             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3942                 PMD_DRV_LOG(ERR,
3943                             "Unsupported vlan type.");
3944                 return -EINVAL;
3945         }
3946
3947         if (pf->support_multi_driver) {
3948                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3949                 return -ENOTSUP;
3950         }
3951
3952         /* 802.1ad frames ability is added in NVM API 1.7*/
3953         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3954                 if (qinq) {
3955                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3956                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3957                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3958                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3959                 } else {
3960                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3961                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3962                 }
3963                 ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3964                 if (ret != I40E_SUCCESS) {
3965                         PMD_DRV_LOG(ERR,
3966                                     "Set switch config failed aq_err: %d",
3967                                     hw->aq.asq_last_status);
3968                         ret = -EIO;
3969                 }
3970         } else
3971                 /* If NVM API < 1.7, keep the register setting */
3972                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3973                                                       tpid, qinq);
3974
3975         return ret;
3976 }
3977
3978 /* Configure outer vlan stripping on or off in QinQ mode */
3979 static int
3980 i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
3981 {
3982         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3983         int ret = I40E_SUCCESS;
3984         uint32_t reg;
3985
3986         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
3987                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
3988                 return -EINVAL;
3989         }
3990
3991         /* Configure for outer VLAN RX stripping */
3992         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
3993
3994         if (on)
3995                 reg |= I40E_VSI_TSR_QINQ_STRIP;
3996         else
3997                 reg &= ~I40E_VSI_TSR_QINQ_STRIP;
3998
3999         ret = i40e_aq_debug_write_register(hw,
4000                                                    I40E_VSI_TSR(vsi->vsi_id),
4001                                                    reg, NULL);
4002         if (ret < 0) {
4003                 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
4004                                     vsi->vsi_id);
4005                 return I40E_ERR_CONFIG;
4006         }
4007
4008         return ret;
4009 }
4010
4011 static int
4012 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4013 {
4014         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4015         struct i40e_vsi *vsi = pf->main_vsi;
4016         struct rte_eth_rxmode *rxmode;
4017
4018         rxmode = &dev->data->dev_conf.rxmode;
4019         if (mask & ETH_VLAN_FILTER_MASK) {
4020                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4021                         i40e_vsi_config_vlan_filter(vsi, TRUE);
4022                 else
4023                         i40e_vsi_config_vlan_filter(vsi, FALSE);
4024         }
4025
4026         if (mask & ETH_VLAN_STRIP_MASK) {
4027                 /* Enable or disable VLAN stripping */
4028                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4029                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
4030                 else
4031                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
4032         }
4033
4034         if (mask & ETH_VLAN_EXTEND_MASK) {
4035                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
4036                         i40e_vsi_config_double_vlan(vsi, TRUE);
4037                         /* Set global registers with default ethertype. */
4038                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
4039                                            RTE_ETHER_TYPE_VLAN);
4040                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
4041                                            RTE_ETHER_TYPE_VLAN);
4042                 }
4043                 else
4044                         i40e_vsi_config_double_vlan(vsi, FALSE);
4045         }
4046
4047         if (mask & ETH_QINQ_STRIP_MASK) {
4048                 /* Enable or disable outer VLAN stripping */
4049                 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
4050                         i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
4051                 else
4052                         i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
4053         }
4054
4055         return 0;
4056 }
4057
4058 static void
4059 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4060                           __rte_unused uint16_t queue,
4061                           __rte_unused int on)
4062 {
4063         PMD_INIT_FUNC_TRACE();
4064 }
4065
4066 static int
4067 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4068 {
4069         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4070         struct i40e_vsi *vsi = pf->main_vsi;
4071         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4072         struct i40e_vsi_vlan_pvid_info info;
4073
4074         memset(&info, 0, sizeof(info));
4075         info.on = on;
4076         if (info.on)
4077                 info.config.pvid = pvid;
4078         else {
4079                 info.config.reject.tagged =
4080                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
4081                 info.config.reject.untagged =
4082                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
4083         }
4084
4085         return i40e_vsi_vlan_pvid_set(vsi, &info);
4086 }
4087
4088 static int
4089 i40e_dev_led_on(struct rte_eth_dev *dev)
4090 {
4091         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4092         uint32_t mode = i40e_led_get(hw);
4093
4094         if (mode == 0)
4095                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
4096
4097         return 0;
4098 }
4099
4100 static int
4101 i40e_dev_led_off(struct rte_eth_dev *dev)
4102 {
4103         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4104         uint32_t mode = i40e_led_get(hw);
4105
4106         if (mode != 0)
4107                 i40e_led_set(hw, 0, false);
4108
4109         return 0;
4110 }
4111
4112 static int
4113 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4114 {
4115         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4116         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4117
4118         fc_conf->pause_time = pf->fc_conf.pause_time;
4119
4120         /* read out from register, in case they are modified by other port */
4121         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
4122                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
4123         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
4124                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
4125
4126         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4127         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4128
4129          /* Return current mode according to actual setting*/
4130         switch (hw->fc.current_mode) {
4131         case I40E_FC_FULL:
4132                 fc_conf->mode = RTE_FC_FULL;
4133                 break;
4134         case I40E_FC_TX_PAUSE:
4135                 fc_conf->mode = RTE_FC_TX_PAUSE;
4136                 break;
4137         case I40E_FC_RX_PAUSE:
4138                 fc_conf->mode = RTE_FC_RX_PAUSE;
4139                 break;
4140         case I40E_FC_NONE:
4141         default:
4142                 fc_conf->mode = RTE_FC_NONE;
4143         };
4144
4145         return 0;
4146 }
4147
4148 static int
4149 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4150 {
4151         uint32_t mflcn_reg, fctrl_reg, reg;
4152         uint32_t max_high_water;
4153         uint8_t i, aq_failure;
4154         int err;
4155         struct i40e_hw *hw;
4156         struct i40e_pf *pf;
4157         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4158                 [RTE_FC_NONE] = I40E_FC_NONE,
4159                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4160                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4161                 [RTE_FC_FULL] = I40E_FC_FULL
4162         };
4163
4164         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4165
4166         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4167         if ((fc_conf->high_water > max_high_water) ||
4168                         (fc_conf->high_water < fc_conf->low_water)) {
4169                 PMD_INIT_LOG(ERR,
4170                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
4171                         max_high_water);
4172                 return -EINVAL;
4173         }
4174
4175         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4176         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4177         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4178
4179         pf->fc_conf.pause_time = fc_conf->pause_time;
4180         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4181         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4182
4183         PMD_INIT_FUNC_TRACE();
4184
4185         /* All the link flow control related enable/disable register
4186          * configuration is handle by the F/W
4187          */
4188         err = i40e_set_fc(hw, &aq_failure, true);
4189         if (err < 0)
4190                 return -ENOSYS;
4191
4192         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4193                 /* Configure flow control refresh threshold,
4194                  * the value for stat_tx_pause_refresh_timer[8]
4195                  * is used for global pause operation.
4196                  */
4197
4198                 I40E_WRITE_REG(hw,
4199                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4200                                pf->fc_conf.pause_time);
4201
4202                 /* configure the timer value included in transmitted pause
4203                  * frame,
4204                  * the value for stat_tx_pause_quanta[8] is used for global
4205                  * pause operation
4206                  */
4207                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4208                                pf->fc_conf.pause_time);
4209
4210                 fctrl_reg = I40E_READ_REG(hw,
4211                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4212
4213                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4214                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4215                 else
4216                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4217
4218                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4219                                fctrl_reg);
4220         } else {
4221                 /* Configure pause time (2 TCs per register) */
4222                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4223                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4224                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4225
4226                 /* Configure flow control refresh threshold value */
4227                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4228                                pf->fc_conf.pause_time / 2);
4229
4230                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4231
4232                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
4233                  *depending on configuration
4234                  */
4235                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
4236                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4237                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4238                 } else {
4239                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4240                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4241                 }
4242
4243                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4244         }
4245
4246         if (!pf->support_multi_driver) {
4247                 /* config water marker both based on the packets and bytes */
4248                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4249                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4250                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4251                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4252                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4253                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4254                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4255                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4256                                   << I40E_KILOSHIFT);
4257                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4258                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4259                                    << I40E_KILOSHIFT);
4260         } else {
4261                 PMD_DRV_LOG(ERR,
4262                             "Water marker configuration is not supported.");
4263         }
4264
4265         I40E_WRITE_FLUSH(hw);
4266
4267         return 0;
4268 }
4269
4270 static int
4271 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4272                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4273 {
4274         PMD_INIT_FUNC_TRACE();
4275
4276         return -ENOSYS;
4277 }
4278
4279 /* Add a MAC address, and update filters */
4280 static int
4281 i40e_macaddr_add(struct rte_eth_dev *dev,
4282                  struct rte_ether_addr *mac_addr,
4283                  __rte_unused uint32_t index,
4284                  uint32_t pool)
4285 {
4286         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4287         struct i40e_mac_filter_info mac_filter;
4288         struct i40e_vsi *vsi;
4289         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4290         int ret;
4291
4292         /* If VMDQ not enabled or configured, return */
4293         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4294                           !pf->nb_cfg_vmdq_vsi)) {
4295                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4296                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4297                         pool);
4298                 return -ENOTSUP;
4299         }
4300
4301         if (pool > pf->nb_cfg_vmdq_vsi) {
4302                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4303                                 pool, pf->nb_cfg_vmdq_vsi);
4304                 return -EINVAL;
4305         }
4306
4307         rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4308         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4309                 mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
4310         else
4311                 mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
4312
4313         if (pool == 0)
4314                 vsi = pf->main_vsi;
4315         else
4316                 vsi = pf->vmdq[pool - 1].vsi;
4317
4318         ret = i40e_vsi_add_mac(vsi, &mac_filter);
4319         if (ret != I40E_SUCCESS) {
4320                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4321                 return -ENODEV;
4322         }
4323         return 0;
4324 }
4325
4326 /* Remove a MAC address, and update filters */
4327 static void
4328 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4329 {
4330         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4331         struct i40e_vsi *vsi;
4332         struct rte_eth_dev_data *data = dev->data;
4333         struct rte_ether_addr *macaddr;
4334         int ret;
4335         uint32_t i;
4336         uint64_t pool_sel;
4337
4338         macaddr = &(data->mac_addrs[index]);
4339
4340         pool_sel = dev->data->mac_pool_sel[index];
4341
4342         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4343                 if (pool_sel & (1ULL << i)) {
4344                         if (i == 0)
4345                                 vsi = pf->main_vsi;
4346                         else {
4347                                 /* No VMDQ pool enabled or configured */
4348                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
4349                                         (i > pf->nb_cfg_vmdq_vsi)) {
4350                                         PMD_DRV_LOG(ERR,
4351                                                 "No VMDQ pool enabled/configured");
4352                                         return;
4353                                 }
4354                                 vsi = pf->vmdq[i - 1].vsi;
4355                         }
4356                         ret = i40e_vsi_delete_mac(vsi, macaddr);
4357
4358                         if (ret) {
4359                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4360                                 return;
4361                         }
4362                 }
4363         }
4364 }
4365
4366 static int
4367 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4368 {
4369         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4370         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4371         uint32_t reg;
4372         int ret;
4373
4374         if (!lut)
4375                 return -EINVAL;
4376
4377         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4378                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4379                                           vsi->type != I40E_VSI_SRIOV,
4380                                           lut, lut_size);
4381                 if (ret) {
4382                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4383                         return ret;
4384                 }
4385         } else {
4386                 uint32_t *lut_dw = (uint32_t *)lut;
4387                 uint16_t i, lut_size_dw = lut_size / 4;
4388
4389                 if (vsi->type == I40E_VSI_SRIOV) {
4390                         for (i = 0; i <= lut_size_dw; i++) {
4391                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4392                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4393                         }
4394                 } else {
4395                         for (i = 0; i < lut_size_dw; i++)
4396                                 lut_dw[i] = I40E_READ_REG(hw,
4397                                                           I40E_PFQF_HLUT(i));
4398                 }
4399         }
4400
4401         return 0;
4402 }
4403
4404 int
4405 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4406 {
4407         struct i40e_pf *pf;
4408         struct i40e_hw *hw;
4409
4410         if (!vsi || !lut)
4411                 return -EINVAL;
4412
4413         pf = I40E_VSI_TO_PF(vsi);
4414         hw = I40E_VSI_TO_HW(vsi);
4415
4416         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4417                 enum i40e_status_code status;
4418
4419                 status = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4420                                              vsi->type != I40E_VSI_SRIOV,
4421                                              lut, lut_size);
4422                 if (status) {
4423                         PMD_DRV_LOG(ERR,
4424                                     "Failed to update RSS lookup table, error status: %d",
4425                                     status);
4426                         return -EIO;
4427                 }
4428         } else {
4429                 uint32_t *lut_dw = (uint32_t *)lut;
4430                 uint16_t i, lut_size_dw = lut_size / 4;
4431
4432                 if (vsi->type == I40E_VSI_SRIOV) {
4433                         for (i = 0; i < lut_size_dw; i++)
4434                                 I40E_WRITE_REG(
4435                                         hw,
4436                                         I40E_VFQF_HLUT1(i, vsi->user_param),
4437                                         lut_dw[i]);
4438                 } else {
4439                         for (i = 0; i < lut_size_dw; i++)
4440                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4441                                                lut_dw[i]);
4442                 }
4443                 I40E_WRITE_FLUSH(hw);
4444         }
4445
4446         return 0;
4447 }
4448
4449 static int
4450 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4451                          struct rte_eth_rss_reta_entry64 *reta_conf,
4452                          uint16_t reta_size)
4453 {
4454         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4455         uint16_t i, lut_size = pf->hash_lut_size;
4456         uint16_t idx, shift;
4457         uint8_t *lut;
4458         int ret;
4459
4460         if (reta_size != lut_size ||
4461                 reta_size > ETH_RSS_RETA_SIZE_512) {
4462                 PMD_DRV_LOG(ERR,
4463                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4464                         reta_size, lut_size);
4465                 return -EINVAL;
4466         }
4467
4468         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4469         if (!lut) {
4470                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4471                 return -ENOMEM;
4472         }
4473         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4474         if (ret)
4475                 goto out;
4476         for (i = 0; i < reta_size; i++) {
4477                 idx = i / RTE_RETA_GROUP_SIZE;
4478                 shift = i % RTE_RETA_GROUP_SIZE;
4479                 if (reta_conf[idx].mask & (1ULL << shift))
4480                         lut[i] = reta_conf[idx].reta[shift];
4481         }
4482         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4483
4484         pf->adapter->rss_reta_updated = 1;
4485
4486 out:
4487         rte_free(lut);
4488
4489         return ret;
4490 }
4491
4492 static int
4493 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4494                         struct rte_eth_rss_reta_entry64 *reta_conf,
4495                         uint16_t reta_size)
4496 {
4497         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4498         uint16_t i, lut_size = pf->hash_lut_size;
4499         uint16_t idx, shift;
4500         uint8_t *lut;
4501         int ret;
4502
4503         if (reta_size != lut_size ||
4504                 reta_size > ETH_RSS_RETA_SIZE_512) {
4505                 PMD_DRV_LOG(ERR,
4506                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4507                         reta_size, lut_size);
4508                 return -EINVAL;
4509         }
4510
4511         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4512         if (!lut) {
4513                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4514                 return -ENOMEM;
4515         }
4516
4517         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4518         if (ret)
4519                 goto out;
4520         for (i = 0; i < reta_size; i++) {
4521                 idx = i / RTE_RETA_GROUP_SIZE;
4522                 shift = i % RTE_RETA_GROUP_SIZE;
4523                 if (reta_conf[idx].mask & (1ULL << shift))
4524                         reta_conf[idx].reta[shift] = lut[i];
4525         }
4526
4527 out:
4528         rte_free(lut);
4529
4530         return ret;
4531 }
4532
4533 /**
4534  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4535  * @hw:   pointer to the HW structure
4536  * @mem:  pointer to mem struct to fill out
4537  * @size: size of memory requested
4538  * @alignment: what to align the allocation to
4539  **/
4540 enum i40e_status_code
4541 i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4542                         struct i40e_dma_mem *mem,
4543                         u64 size,
4544                         u32 alignment)
4545 {
4546         const struct rte_memzone *mz = NULL;
4547         char z_name[RTE_MEMZONE_NAMESIZE];
4548
4549         if (!mem)
4550                 return I40E_ERR_PARAM;
4551
4552         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4553         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4554                         RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4555         if (!mz)
4556                 return I40E_ERR_NO_MEMORY;
4557
4558         mem->size = size;
4559         mem->va = mz->addr;
4560         mem->pa = mz->iova;
4561         mem->zone = (const void *)mz;
4562         PMD_DRV_LOG(DEBUG,
4563                 "memzone %s allocated with physical address: %"PRIu64,
4564                 mz->name, mem->pa);
4565
4566         return I40E_SUCCESS;
4567 }
4568
4569 /**
4570  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4571  * @hw:   pointer to the HW structure
4572  * @mem:  ptr to mem struct to free
4573  **/
4574 enum i40e_status_code
4575 i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4576                     struct i40e_dma_mem *mem)
4577 {
4578         if (!mem)
4579                 return I40E_ERR_PARAM;
4580
4581         PMD_DRV_LOG(DEBUG,
4582                 "memzone %s to be freed with physical address: %"PRIu64,
4583                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4584         rte_memzone_free((const struct rte_memzone *)mem->zone);
4585         mem->zone = NULL;
4586         mem->va = NULL;
4587         mem->pa = (u64)0;
4588
4589         return I40E_SUCCESS;
4590 }
4591
4592 /**
4593  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4594  * @hw:   pointer to the HW structure
4595  * @mem:  pointer to mem struct to fill out
4596  * @size: size of memory requested
4597  **/
4598 enum i40e_status_code
4599 i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4600                          struct i40e_virt_mem *mem,
4601                          u32 size)
4602 {
4603         if (!mem)
4604                 return I40E_ERR_PARAM;
4605
4606         mem->size = size;
4607         mem->va = rte_zmalloc("i40e", size, 0);
4608
4609         if (mem->va)
4610                 return I40E_SUCCESS;
4611         else
4612                 return I40E_ERR_NO_MEMORY;
4613 }
4614
4615 /**
4616  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4617  * @hw:   pointer to the HW structure
4618  * @mem:  pointer to mem struct to free
4619  **/
4620 enum i40e_status_code
4621 i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4622                      struct i40e_virt_mem *mem)
4623 {
4624         if (!mem)
4625                 return I40E_ERR_PARAM;
4626
4627         rte_free(mem->va);
4628         mem->va = NULL;
4629
4630         return I40E_SUCCESS;
4631 }
4632
4633 void
4634 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4635 {
4636         rte_spinlock_init(&sp->spinlock);
4637 }
4638
4639 void
4640 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4641 {
4642         rte_spinlock_lock(&sp->spinlock);
4643 }
4644
4645 void
4646 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4647 {
4648         rte_spinlock_unlock(&sp->spinlock);
4649 }
4650
4651 void
4652 i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
4653 {
4654         return;
4655 }
4656
4657 /**
4658  * Get the hardware capabilities, which will be parsed
4659  * and saved into struct i40e_hw.
4660  */
4661 static int
4662 i40e_get_cap(struct i40e_hw *hw)
4663 {
4664         struct i40e_aqc_list_capabilities_element_resp *buf;
4665         uint16_t len, size = 0;
4666         int ret;
4667
4668         /* Calculate a huge enough buff for saving response data temporarily */
4669         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4670                                                 I40E_MAX_CAP_ELE_NUM;
4671         buf = rte_zmalloc("i40e", len, 0);
4672         if (!buf) {
4673                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4674                 return I40E_ERR_NO_MEMORY;
4675         }
4676
4677         /* Get, parse the capabilities and save it to hw */
4678         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4679                         i40e_aqc_opc_list_func_capabilities, NULL);
4680         if (ret != I40E_SUCCESS)
4681                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4682
4683         /* Free the temporary buffer after being used */
4684         rte_free(buf);
4685
4686         return ret;
4687 }
4688
4689 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4690
4691 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4692                 const char *value,
4693                 void *opaque)
4694 {
4695         struct i40e_pf *pf;
4696         unsigned long num;
4697         char *end;
4698
4699         pf = (struct i40e_pf *)opaque;
4700         RTE_SET_USED(key);
4701
4702         errno = 0;
4703         num = strtoul(value, &end, 0);
4704         if (errno != 0 || end == value || *end != 0) {
4705                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4706                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4707                 return -(EINVAL);
4708         }
4709
4710         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4711                 pf->vf_nb_qp_max = (uint16_t)num;
4712         else
4713                 /* here return 0 to make next valid same argument work */
4714                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4715                             "power of 2 and equal or less than 16 !, Now it is "
4716                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4717
4718         return 0;
4719 }
4720
4721 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4722 {
4723         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4724         struct rte_kvargs *kvlist;
4725         int kvargs_count;
4726
4727         /* set default queue number per VF as 4 */
4728         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4729
4730         if (dev->device->devargs == NULL)
4731                 return 0;
4732
4733         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4734         if (kvlist == NULL)
4735                 return -(EINVAL);
4736
4737         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4738         if (!kvargs_count) {
4739                 rte_kvargs_free(kvlist);
4740                 return 0;
4741         }
4742
4743         if (kvargs_count > 1)
4744                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4745                             "the first invalid or last valid one is used !",
4746                             ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4747
4748         rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4749                            i40e_pf_parse_vf_queue_number_handler, pf);
4750
4751         rte_kvargs_free(kvlist);
4752
4753         return 0;
4754 }
4755
4756 static int
4757 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4758 {
4759         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4760         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4761         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4762         uint16_t qp_count = 0, vsi_count = 0;
4763
4764         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4765                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4766                 return -EINVAL;
4767         }
4768
4769         i40e_pf_config_vf_rxq_number(dev);
4770
4771         /* Add the parameter init for LFC */
4772         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4773         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4774         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4775
4776         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4777         pf->max_num_vsi = hw->func_caps.num_vsis;
4778         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4779         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4780
4781         /* FDir queue/VSI allocation */
4782         pf->fdir_qp_offset = 0;
4783         if (hw->func_caps.fd) {
4784                 pf->flags |= I40E_FLAG_FDIR;
4785                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4786         } else {
4787                 pf->fdir_nb_qps = 0;
4788         }
4789         qp_count += pf->fdir_nb_qps;
4790         vsi_count += 1;
4791
4792         /* LAN queue/VSI allocation */
4793         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4794         if (!hw->func_caps.rss) {
4795                 pf->lan_nb_qps = 1;
4796         } else {
4797                 pf->flags |= I40E_FLAG_RSS;
4798                 if (hw->mac.type == I40E_MAC_X722)
4799                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4800                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4801         }
4802         qp_count += pf->lan_nb_qps;
4803         vsi_count += 1;
4804
4805         /* VF queue/VSI allocation */
4806         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4807         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4808                 pf->flags |= I40E_FLAG_SRIOV;
4809                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4810                 pf->vf_num = pci_dev->max_vfs;
4811                 PMD_DRV_LOG(DEBUG,
4812                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4813                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4814         } else {
4815                 pf->vf_nb_qps = 0;
4816                 pf->vf_num = 0;
4817         }
4818         qp_count += pf->vf_nb_qps * pf->vf_num;
4819         vsi_count += pf->vf_num;
4820
4821         /* VMDq queue/VSI allocation */
4822         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4823         pf->vmdq_nb_qps = 0;
4824         pf->max_nb_vmdq_vsi = 0;
4825         if (hw->func_caps.vmdq) {
4826                 if (qp_count < hw->func_caps.num_tx_qp &&
4827                         vsi_count < hw->func_caps.num_vsis) {
4828                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4829                                 qp_count) / pf->vmdq_nb_qp_max;
4830
4831                         /* Limit the maximum number of VMDq vsi to the maximum
4832                          * ethdev can support
4833                          */
4834                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4835                                 hw->func_caps.num_vsis - vsi_count);
4836                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4837                                 ETH_64_POOLS);
4838                         if (pf->max_nb_vmdq_vsi) {
4839                                 pf->flags |= I40E_FLAG_VMDQ;
4840                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4841                                 PMD_DRV_LOG(DEBUG,
4842                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4843                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4844                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4845                         } else {
4846                                 PMD_DRV_LOG(INFO,
4847                                         "No enough queues left for VMDq");
4848                         }
4849                 } else {
4850                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4851                 }
4852         }
4853         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4854         vsi_count += pf->max_nb_vmdq_vsi;
4855
4856         if (hw->func_caps.dcb)
4857                 pf->flags |= I40E_FLAG_DCB;
4858
4859         if (qp_count > hw->func_caps.num_tx_qp) {
4860                 PMD_DRV_LOG(ERR,
4861                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4862                         qp_count, hw->func_caps.num_tx_qp);
4863                 return -EINVAL;
4864         }
4865         if (vsi_count > hw->func_caps.num_vsis) {
4866                 PMD_DRV_LOG(ERR,
4867                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4868                         vsi_count, hw->func_caps.num_vsis);
4869                 return -EINVAL;
4870         }
4871
4872         return 0;
4873 }
4874
4875 static int
4876 i40e_pf_get_switch_config(struct i40e_pf *pf)
4877 {
4878         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4879         struct i40e_aqc_get_switch_config_resp *switch_config;
4880         struct i40e_aqc_switch_config_element_resp *element;
4881         uint16_t start_seid = 0, num_reported;
4882         int ret;
4883
4884         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4885                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4886         if (!switch_config) {
4887                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4888                 return -ENOMEM;
4889         }
4890
4891         /* Get the switch configurations */
4892         ret = i40e_aq_get_switch_config(hw, switch_config,
4893                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4894         if (ret != I40E_SUCCESS) {
4895                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4896                 goto fail;
4897         }
4898         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4899         if (num_reported != 1) { /* The number should be 1 */
4900                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4901                 goto fail;
4902         }
4903
4904         /* Parse the switch configuration elements */
4905         element = &(switch_config->element[0]);
4906         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4907                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4908                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4909         } else
4910                 PMD_DRV_LOG(INFO, "Unknown element type");
4911
4912 fail:
4913         rte_free(switch_config);
4914
4915         return ret;
4916 }
4917
4918 static int
4919 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4920                         uint32_t num)
4921 {
4922         struct pool_entry *entry;
4923
4924         if (pool == NULL || num == 0)
4925                 return -EINVAL;
4926
4927         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4928         if (entry == NULL) {
4929                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4930                 return -ENOMEM;
4931         }
4932
4933         /* queue heap initialize */
4934         pool->num_free = num;
4935         pool->num_alloc = 0;
4936         pool->base = base;
4937         LIST_INIT(&pool->alloc_list);
4938         LIST_INIT(&pool->free_list);
4939
4940         /* Initialize element  */
4941         entry->base = 0;
4942         entry->len = num;
4943
4944         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4945         return 0;
4946 }
4947
4948 static void
4949 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4950 {
4951         struct pool_entry *entry, *next_entry;
4952
4953         if (pool == NULL)
4954                 return;
4955
4956         for (entry = LIST_FIRST(&pool->alloc_list);
4957                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4958                         entry = next_entry) {
4959                 LIST_REMOVE(entry, next);
4960                 rte_free(entry);
4961         }
4962
4963         for (entry = LIST_FIRST(&pool->free_list);
4964                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4965                         entry = next_entry) {
4966                 LIST_REMOVE(entry, next);
4967                 rte_free(entry);
4968         }
4969
4970         pool->num_free = 0;
4971         pool->num_alloc = 0;
4972         pool->base = 0;
4973         LIST_INIT(&pool->alloc_list);
4974         LIST_INIT(&pool->free_list);
4975 }
4976
4977 static int
4978 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4979                        uint32_t base)
4980 {
4981         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4982         uint32_t pool_offset;
4983         uint16_t len;
4984         int insert;
4985
4986         if (pool == NULL) {
4987                 PMD_DRV_LOG(ERR, "Invalid parameter");
4988                 return -EINVAL;
4989         }
4990
4991         pool_offset = base - pool->base;
4992         /* Lookup in alloc list */
4993         LIST_FOREACH(entry, &pool->alloc_list, next) {
4994                 if (entry->base == pool_offset) {
4995                         valid_entry = entry;
4996                         LIST_REMOVE(entry, next);
4997                         break;
4998                 }
4999         }
5000
5001         /* Not find, return */
5002         if (valid_entry == NULL) {
5003                 PMD_DRV_LOG(ERR, "Failed to find entry");
5004                 return -EINVAL;
5005         }
5006
5007         /**
5008          * Found it, move it to free list  and try to merge.
5009          * In order to make merge easier, always sort it by qbase.
5010          * Find adjacent prev and last entries.
5011          */
5012         prev = next = NULL;
5013         LIST_FOREACH(entry, &pool->free_list, next) {
5014                 if (entry->base > valid_entry->base) {
5015                         next = entry;
5016                         break;
5017                 }
5018                 prev = entry;
5019         }
5020
5021         insert = 0;
5022         len = valid_entry->len;
5023         /* Try to merge with next one*/
5024         if (next != NULL) {
5025                 /* Merge with next one */
5026                 if (valid_entry->base + len == next->base) {
5027                         next->base = valid_entry->base;
5028                         next->len += len;
5029                         rte_free(valid_entry);
5030                         valid_entry = next;
5031                         insert = 1;
5032                 }
5033         }
5034
5035         if (prev != NULL) {
5036                 /* Merge with previous one */
5037                 if (prev->base + prev->len == valid_entry->base) {
5038                         prev->len += len;
5039                         /* If it merge with next one, remove next node */
5040                         if (insert == 1) {
5041                                 LIST_REMOVE(valid_entry, next);
5042                                 rte_free(valid_entry);
5043                                 valid_entry = NULL;
5044                         } else {
5045                                 rte_free(valid_entry);
5046                                 valid_entry = NULL;
5047                                 insert = 1;
5048                         }
5049                 }
5050         }
5051
5052         /* Not find any entry to merge, insert */
5053         if (insert == 0) {
5054                 if (prev != NULL)
5055                         LIST_INSERT_AFTER(prev, valid_entry, next);
5056                 else if (next != NULL)
5057                         LIST_INSERT_BEFORE(next, valid_entry, next);
5058                 else /* It's empty list, insert to head */
5059                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5060         }
5061
5062         pool->num_free += len;
5063         pool->num_alloc -= len;
5064
5065         return 0;
5066 }
5067
5068 static int
5069 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5070                        uint16_t num)
5071 {
5072         struct pool_entry *entry, *valid_entry;
5073
5074         if (pool == NULL || num == 0) {
5075                 PMD_DRV_LOG(ERR, "Invalid parameter");
5076                 return -EINVAL;
5077         }
5078
5079         if (pool->num_free < num) {
5080                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5081                             num, pool->num_free);
5082                 return -ENOMEM;
5083         }
5084
5085         valid_entry = NULL;
5086         /* Lookup  in free list and find most fit one */
5087         LIST_FOREACH(entry, &pool->free_list, next) {
5088                 if (entry->len >= num) {
5089                         /* Find best one */
5090                         if (entry->len == num) {
5091                                 valid_entry = entry;
5092                                 break;
5093                         }
5094                         if (valid_entry == NULL || valid_entry->len > entry->len)
5095                                 valid_entry = entry;
5096                 }
5097         }
5098
5099         /* Not find one to satisfy the request, return */
5100         if (valid_entry == NULL) {
5101                 PMD_DRV_LOG(ERR, "No valid entry found");
5102                 return -ENOMEM;
5103         }
5104         /**
5105          * The entry have equal queue number as requested,
5106          * remove it from alloc_list.
5107          */
5108         if (valid_entry->len == num) {
5109                 LIST_REMOVE(valid_entry, next);
5110         } else {
5111                 /**
5112                  * The entry have more numbers than requested,
5113                  * create a new entry for alloc_list and minus its
5114                  * queue base and number in free_list.
5115                  */
5116                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5117                 if (entry == NULL) {
5118                         PMD_DRV_LOG(ERR,
5119                                 "Failed to allocate memory for resource pool");
5120                         return -ENOMEM;
5121                 }
5122                 entry->base = valid_entry->base;
5123                 entry->len = num;
5124                 valid_entry->base += num;
5125                 valid_entry->len -= num;
5126                 valid_entry = entry;
5127         }
5128
5129         /* Insert it into alloc list, not sorted */
5130         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5131
5132         pool->num_free -= valid_entry->len;
5133         pool->num_alloc += valid_entry->len;
5134
5135         return valid_entry->base + pool->base;
5136 }
5137
5138 /**
5139  * bitmap_is_subset - Check whether src2 is subset of src1
5140  **/
5141 static inline int
5142 bitmap_is_subset(uint8_t src1, uint8_t src2)
5143 {
5144         return !((src1 ^ src2) & src2);
5145 }
5146
5147 static enum i40e_status_code
5148 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5149 {
5150         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5151
5152         /* If DCB is not supported, only default TC is supported */
5153         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5154                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5155                 return I40E_NOT_SUPPORTED;
5156         }
5157
5158         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5159                 PMD_DRV_LOG(ERR,
5160                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
5161                         hw->func_caps.enabled_tcmap, enabled_tcmap);
5162                 return I40E_NOT_SUPPORTED;
5163         }
5164         return I40E_SUCCESS;
5165 }
5166
5167 int
5168 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5169                                 struct i40e_vsi_vlan_pvid_info *info)
5170 {
5171         struct i40e_hw *hw;
5172         struct i40e_vsi_context ctxt;
5173         uint8_t vlan_flags = 0;
5174         int ret;
5175
5176         if (vsi == NULL || info == NULL) {
5177                 PMD_DRV_LOG(ERR, "invalid parameters");
5178                 return I40E_ERR_PARAM;
5179         }
5180
5181         if (info->on) {
5182                 vsi->info.pvid = info->config.pvid;
5183                 /**
5184                  * If insert pvid is enabled, only tagged pkts are
5185                  * allowed to be sent out.
5186                  */
5187                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5188                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5189         } else {
5190                 vsi->info.pvid = 0;
5191                 if (info->config.reject.tagged == 0)
5192                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5193
5194                 if (info->config.reject.untagged == 0)
5195                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5196         }
5197         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5198                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
5199         vsi->info.port_vlan_flags |= vlan_flags;
5200         vsi->info.valid_sections =
5201                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5202         memset(&ctxt, 0, sizeof(ctxt));
5203         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5204         ctxt.seid = vsi->seid;
5205
5206         hw = I40E_VSI_TO_HW(vsi);
5207         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5208         if (ret != I40E_SUCCESS)
5209                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
5210
5211         return ret;
5212 }
5213
5214 static int
5215 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5216 {
5217         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5218         int i, ret;
5219         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5220
5221         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5222         if (ret != I40E_SUCCESS)
5223                 return ret;
5224
5225         if (!vsi->seid) {
5226                 PMD_DRV_LOG(ERR, "seid not valid");
5227                 return -EINVAL;
5228         }
5229
5230         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5231         tc_bw_data.tc_valid_bits = enabled_tcmap;
5232         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5233                 tc_bw_data.tc_bw_credits[i] =
5234                         (enabled_tcmap & (1 << i)) ? 1 : 0;
5235
5236         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5237         if (ret != I40E_SUCCESS) {
5238                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5239                 return ret;
5240         }
5241
5242         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5243                                         sizeof(vsi->info.qs_handle));
5244         return I40E_SUCCESS;
5245 }
5246
5247 static enum i40e_status_code
5248 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5249                                  struct i40e_aqc_vsi_properties_data *info,
5250                                  uint8_t enabled_tcmap)
5251 {
5252         enum i40e_status_code ret;
5253         int i, total_tc = 0;
5254         uint16_t qpnum_per_tc, bsf, qp_idx;
5255
5256         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5257         if (ret != I40E_SUCCESS)
5258                 return ret;
5259
5260         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5261                 if (enabled_tcmap & (1 << i))
5262                         total_tc++;
5263         if (total_tc == 0)
5264                 total_tc = 1;
5265         vsi->enabled_tc = enabled_tcmap;
5266
5267         /* Number of queues per enabled TC */
5268         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5269         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5270         bsf = rte_bsf32(qpnum_per_tc);
5271
5272         /* Adjust the queue number to actual queues that can be applied */
5273         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5274                 vsi->nb_qps = qpnum_per_tc * total_tc;
5275
5276         /**
5277          * Configure TC and queue mapping parameters, for enabled TC,
5278          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5279          * default queue will serve it.
5280          */
5281         qp_idx = 0;
5282         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5283                 if (vsi->enabled_tc & (1 << i)) {
5284                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5285                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5286                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5287                         qp_idx += qpnum_per_tc;
5288                 } else
5289                         info->tc_mapping[i] = 0;
5290         }
5291
5292         /* Associate queue number with VSI */
5293         if (vsi->type == I40E_VSI_SRIOV) {
5294                 info->mapping_flags |=
5295                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5296                 for (i = 0; i < vsi->nb_qps; i++)
5297                         info->queue_mapping[i] =
5298                                 rte_cpu_to_le_16(vsi->base_queue + i);
5299         } else {
5300                 info->mapping_flags |=
5301                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5302                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5303         }
5304         info->valid_sections |=
5305                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5306
5307         return I40E_SUCCESS;
5308 }
5309
5310 static int
5311 i40e_veb_release(struct i40e_veb *veb)
5312 {
5313         struct i40e_vsi *vsi;
5314         struct i40e_hw *hw;
5315
5316         if (veb == NULL)
5317                 return -EINVAL;
5318
5319         if (!TAILQ_EMPTY(&veb->head)) {
5320                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5321                 return -EACCES;
5322         }
5323         /* associate_vsi field is NULL for floating VEB */
5324         if (veb->associate_vsi != NULL) {
5325                 vsi = veb->associate_vsi;
5326                 hw = I40E_VSI_TO_HW(vsi);
5327
5328                 vsi->uplink_seid = veb->uplink_seid;
5329                 vsi->veb = NULL;
5330         } else {
5331                 veb->associate_pf->main_vsi->floating_veb = NULL;
5332                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5333         }
5334
5335         i40e_aq_delete_element(hw, veb->seid, NULL);
5336         rte_free(veb);
5337         return I40E_SUCCESS;
5338 }
5339
5340 /* Setup a veb */
5341 static struct i40e_veb *
5342 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5343 {
5344         struct i40e_veb *veb;
5345         int ret;
5346         struct i40e_hw *hw;
5347
5348         if (pf == NULL) {
5349                 PMD_DRV_LOG(ERR,
5350                             "veb setup failed, associated PF shouldn't null");
5351                 return NULL;
5352         }
5353         hw = I40E_PF_TO_HW(pf);
5354
5355         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5356         if (!veb) {
5357                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5358                 goto fail;
5359         }
5360
5361         veb->associate_vsi = vsi;
5362         veb->associate_pf = pf;
5363         TAILQ_INIT(&veb->head);
5364         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5365
5366         /* create floating veb if vsi is NULL */
5367         if (vsi != NULL) {
5368                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5369                                       I40E_DEFAULT_TCMAP, false,
5370                                       &veb->seid, false, NULL);
5371         } else {
5372                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5373                                       true, &veb->seid, false, NULL);
5374         }
5375
5376         if (ret != I40E_SUCCESS) {
5377                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5378                             hw->aq.asq_last_status);
5379                 goto fail;
5380         }
5381         veb->enabled_tc = I40E_DEFAULT_TCMAP;
5382
5383         /* get statistics index */
5384         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5385                                 &veb->stats_idx, NULL, NULL, NULL);
5386         if (ret != I40E_SUCCESS) {
5387                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5388                             hw->aq.asq_last_status);
5389                 goto fail;
5390         }
5391         /* Get VEB bandwidth, to be implemented */
5392         /* Now associated vsi binding to the VEB, set uplink to this VEB */
5393         if (vsi)
5394                 vsi->uplink_seid = veb->seid;
5395
5396         return veb;
5397 fail:
5398         rte_free(veb);
5399         return NULL;
5400 }
5401
5402 int
5403 i40e_vsi_release(struct i40e_vsi *vsi)
5404 {
5405         struct i40e_pf *pf;
5406         struct i40e_hw *hw;
5407         struct i40e_vsi_list *vsi_list;
5408         void *temp;
5409         int ret;
5410         struct i40e_mac_filter *f;
5411         uint16_t user_param;
5412
5413         if (!vsi)
5414                 return I40E_SUCCESS;
5415
5416         if (!vsi->adapter)
5417                 return -EFAULT;
5418
5419         user_param = vsi->user_param;
5420
5421         pf = I40E_VSI_TO_PF(vsi);
5422         hw = I40E_VSI_TO_HW(vsi);
5423
5424         /* VSI has child to attach, release child first */
5425         if (vsi->veb) {
5426                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5427                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5428                                 return -1;
5429                 }
5430                 i40e_veb_release(vsi->veb);
5431         }
5432
5433         if (vsi->floating_veb) {
5434                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5435                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5436                                 return -1;
5437                 }
5438         }
5439
5440         /* Remove all macvlan filters of the VSI */
5441         i40e_vsi_remove_all_macvlan_filter(vsi);
5442         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5443                 rte_free(f);
5444
5445         if (vsi->type != I40E_VSI_MAIN &&
5446             ((vsi->type != I40E_VSI_SRIOV) ||
5447             !pf->floating_veb_list[user_param])) {
5448                 /* Remove vsi from parent's sibling list */
5449                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5450                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5451                         return I40E_ERR_PARAM;
5452                 }
5453                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5454                                 &vsi->sib_vsi_list, list);
5455
5456                 /* Remove all switch element of the VSI */
5457                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5458                 if (ret != I40E_SUCCESS)
5459                         PMD_DRV_LOG(ERR, "Failed to delete element");
5460         }
5461
5462         if ((vsi->type == I40E_VSI_SRIOV) &&
5463             pf->floating_veb_list[user_param]) {
5464                 /* Remove vsi from parent's sibling list */
5465                 if (vsi->parent_vsi == NULL ||
5466                     vsi->parent_vsi->floating_veb == NULL) {
5467                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5468                         return I40E_ERR_PARAM;
5469                 }
5470                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5471                              &vsi->sib_vsi_list, list);
5472
5473                 /* Remove all switch element of the VSI */
5474                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5475                 if (ret != I40E_SUCCESS)
5476                         PMD_DRV_LOG(ERR, "Failed to delete element");
5477         }
5478
5479         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5480
5481         if (vsi->type != I40E_VSI_SRIOV)
5482                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5483         rte_free(vsi);
5484
5485         return I40E_SUCCESS;
5486 }
5487
5488 static int
5489 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5490 {
5491         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5492         struct i40e_aqc_remove_macvlan_element_data def_filter;
5493         struct i40e_mac_filter_info filter;
5494         int ret;
5495
5496         if (vsi->type != I40E_VSI_MAIN)
5497                 return I40E_ERR_CONFIG;
5498         memset(&def_filter, 0, sizeof(def_filter));
5499         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5500                                         ETH_ADDR_LEN);
5501         def_filter.vlan_tag = 0;
5502         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5503                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5504         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5505         if (ret != I40E_SUCCESS) {
5506                 struct i40e_mac_filter *f;
5507                 struct rte_ether_addr *mac;
5508
5509                 PMD_DRV_LOG(DEBUG,
5510                             "Cannot remove the default macvlan filter");
5511                 /* It needs to add the permanent mac into mac list */
5512                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5513                 if (f == NULL) {
5514                         PMD_DRV_LOG(ERR, "failed to allocate memory");
5515                         return I40E_ERR_NO_MEMORY;
5516                 }
5517                 mac = &f->mac_info.mac_addr;
5518                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5519                                 ETH_ADDR_LEN);
5520                 f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5521                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5522                 vsi->mac_num++;
5523
5524                 return ret;
5525         }
5526         rte_memcpy(&filter.mac_addr,
5527                 (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5528         filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5529         return i40e_vsi_add_mac(vsi, &filter);
5530 }
5531
5532 /*
5533  * i40e_vsi_get_bw_config - Query VSI BW Information
5534  * @vsi: the VSI to be queried
5535  *
5536  * Returns 0 on success, negative value on failure
5537  */
5538 static enum i40e_status_code
5539 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5540 {
5541         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5542         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5543         struct i40e_hw *hw = &vsi->adapter->hw;
5544         i40e_status ret;
5545         int i;
5546         uint32_t bw_max;
5547
5548         memset(&bw_config, 0, sizeof(bw_config));
5549         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5550         if (ret != I40E_SUCCESS) {
5551                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5552                             hw->aq.asq_last_status);
5553                 return ret;
5554         }
5555
5556         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5557         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5558                                         &ets_sla_config, NULL);
5559         if (ret != I40E_SUCCESS) {
5560                 PMD_DRV_LOG(ERR,
5561                         "VSI failed to get TC bandwdith configuration %u",
5562                         hw->aq.asq_last_status);
5563                 return ret;
5564         }
5565
5566         /* store and print out BW info */
5567         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5568         vsi->bw_info.bw_max = bw_config.max_bw;
5569         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5570         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5571         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5572                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5573                      I40E_16_BIT_WIDTH);
5574         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5575                 vsi->bw_info.bw_ets_share_credits[i] =
5576                                 ets_sla_config.share_credits[i];
5577                 vsi->bw_info.bw_ets_credits[i] =
5578                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5579                 /* 4 bits per TC, 4th bit is reserved */
5580                 vsi->bw_info.bw_ets_max[i] =
5581                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5582                                   RTE_LEN2MASK(3, uint8_t));
5583                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5584                             vsi->bw_info.bw_ets_share_credits[i]);
5585                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5586                             vsi->bw_info.bw_ets_credits[i]);
5587                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5588                             vsi->bw_info.bw_ets_max[i]);
5589         }
5590
5591         return I40E_SUCCESS;
5592 }
5593
5594 /* i40e_enable_pf_lb
5595  * @pf: pointer to the pf structure
5596  *
5597  * allow loopback on pf
5598  */
5599 static inline void
5600 i40e_enable_pf_lb(struct i40e_pf *pf)
5601 {
5602         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5603         struct i40e_vsi_context ctxt;
5604         int ret;
5605
5606         /* Use the FW API if FW >= v5.0 */
5607         if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5608                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5609                 return;
5610         }
5611
5612         memset(&ctxt, 0, sizeof(ctxt));
5613         ctxt.seid = pf->main_vsi_seid;
5614         ctxt.pf_num = hw->pf_id;
5615         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5616         if (ret) {
5617                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5618                             ret, hw->aq.asq_last_status);
5619                 return;
5620         }
5621         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5622         ctxt.info.valid_sections =
5623                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5624         ctxt.info.switch_id |=
5625                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5626
5627         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5628         if (ret)
5629                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5630                             hw->aq.asq_last_status);
5631 }
5632
5633 /* Setup a VSI */
5634 struct i40e_vsi *
5635 i40e_vsi_setup(struct i40e_pf *pf,
5636                enum i40e_vsi_type type,
5637                struct i40e_vsi *uplink_vsi,
5638                uint16_t user_param)
5639 {
5640         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5641         struct i40e_vsi *vsi;
5642         struct i40e_mac_filter_info filter;
5643         int ret;
5644         struct i40e_vsi_context ctxt;
5645         struct rte_ether_addr broadcast =
5646                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5647
5648         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5649             uplink_vsi == NULL) {
5650                 PMD_DRV_LOG(ERR,
5651                         "VSI setup failed, VSI link shouldn't be NULL");
5652                 return NULL;
5653         }
5654
5655         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5656                 PMD_DRV_LOG(ERR,
5657                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5658                 return NULL;
5659         }
5660
5661         /* two situations
5662          * 1.type is not MAIN and uplink vsi is not NULL
5663          * If uplink vsi didn't setup VEB, create one first under veb field
5664          * 2.type is SRIOV and the uplink is NULL
5665          * If floating VEB is NULL, create one veb under floating veb field
5666          */
5667
5668         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5669             uplink_vsi->veb == NULL) {
5670                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5671
5672                 if (uplink_vsi->veb == NULL) {
5673                         PMD_DRV_LOG(ERR, "VEB setup failed");
5674                         return NULL;
5675                 }
5676                 /* set ALLOWLOOPBACk on pf, when veb is created */
5677                 i40e_enable_pf_lb(pf);
5678         }
5679
5680         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5681             pf->main_vsi->floating_veb == NULL) {
5682                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5683
5684                 if (pf->main_vsi->floating_veb == NULL) {
5685                         PMD_DRV_LOG(ERR, "VEB setup failed");
5686                         return NULL;
5687                 }
5688         }
5689
5690         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5691         if (!vsi) {
5692                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5693                 return NULL;
5694         }
5695         TAILQ_INIT(&vsi->mac_list);
5696         vsi->type = type;
5697         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5698         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5699         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5700         vsi->user_param = user_param;
5701         vsi->vlan_anti_spoof_on = 0;
5702         vsi->vlan_filter_on = 0;
5703         /* Allocate queues */
5704         switch (vsi->type) {
5705         case I40E_VSI_MAIN  :
5706                 vsi->nb_qps = pf->lan_nb_qps;
5707                 break;
5708         case I40E_VSI_SRIOV :
5709                 vsi->nb_qps = pf->vf_nb_qps;
5710                 break;
5711         case I40E_VSI_VMDQ2:
5712                 vsi->nb_qps = pf->vmdq_nb_qps;
5713                 break;
5714         case I40E_VSI_FDIR:
5715                 vsi->nb_qps = pf->fdir_nb_qps;
5716                 break;
5717         default:
5718                 goto fail_mem;
5719         }
5720         /*
5721          * The filter status descriptor is reported in rx queue 0,
5722          * while the tx queue for fdir filter programming has no
5723          * such constraints, can be non-zero queues.
5724          * To simplify it, choose FDIR vsi use queue 0 pair.
5725          * To make sure it will use queue 0 pair, queue allocation
5726          * need be done before this function is called
5727          */
5728         if (type != I40E_VSI_FDIR) {
5729                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5730                         if (ret < 0) {
5731                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5732                                                 vsi->seid, ret);
5733                                 goto fail_mem;
5734                         }
5735                         vsi->base_queue = ret;
5736         } else
5737                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5738
5739         /* VF has MSIX interrupt in VF range, don't allocate here */
5740         if (type == I40E_VSI_MAIN) {
5741                 if (pf->support_multi_driver) {
5742                         /* If support multi-driver, need to use INT0 instead of
5743                          * allocating from msix pool. The Msix pool is init from
5744                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5745                          * to 1 without calling i40e_res_pool_alloc.
5746                          */
5747                         vsi->msix_intr = 0;
5748                         vsi->nb_msix = 1;
5749                 } else {
5750                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5751                                                   RTE_MIN(vsi->nb_qps,
5752                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5753                         if (ret < 0) {
5754                                 PMD_DRV_LOG(ERR,
5755                                             "VSI MAIN %d get heap failed %d",
5756                                             vsi->seid, ret);
5757                                 goto fail_queue_alloc;
5758                         }
5759                         vsi->msix_intr = ret;
5760                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5761                                                RTE_MAX_RXTX_INTR_VEC_ID);
5762                 }
5763         } else if (type != I40E_VSI_SRIOV) {
5764                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5765                 if (ret < 0) {
5766                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5767                         if (type != I40E_VSI_FDIR)
5768                                 goto fail_queue_alloc;
5769                         vsi->msix_intr = 0;
5770                         vsi->nb_msix = 0;
5771                 } else {
5772                         vsi->msix_intr = ret;
5773                         vsi->nb_msix = 1;
5774                 }
5775         } else {
5776                 vsi->msix_intr = 0;
5777                 vsi->nb_msix = 0;
5778         }
5779
5780         /* Add VSI */
5781         if (type == I40E_VSI_MAIN) {
5782                 /* For main VSI, no need to add since it's default one */
5783                 vsi->uplink_seid = pf->mac_seid;
5784                 vsi->seid = pf->main_vsi_seid;
5785                 /* Bind queues with specific MSIX interrupt */
5786                 /**
5787                  * Needs 2 interrupt at least, one for misc cause which will
5788                  * enabled from OS side, Another for queues binding the
5789                  * interrupt from device side only.
5790                  */
5791
5792                 /* Get default VSI parameters from hardware */
5793                 memset(&ctxt, 0, sizeof(ctxt));
5794                 ctxt.seid = vsi->seid;
5795                 ctxt.pf_num = hw->pf_id;
5796                 ctxt.uplink_seid = vsi->uplink_seid;
5797                 ctxt.vf_num = 0;
5798                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5799                 if (ret != I40E_SUCCESS) {
5800                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5801                         goto fail_msix_alloc;
5802                 }
5803                 rte_memcpy(&vsi->info, &ctxt.info,
5804                         sizeof(struct i40e_aqc_vsi_properties_data));
5805                 vsi->vsi_id = ctxt.vsi_number;
5806                 vsi->info.valid_sections = 0;
5807
5808                 /* Configure tc, enabled TC0 only */
5809                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5810                         I40E_SUCCESS) {
5811                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5812                         goto fail_msix_alloc;
5813                 }
5814
5815                 /* TC, queue mapping */
5816                 memset(&ctxt, 0, sizeof(ctxt));
5817                 vsi->info.valid_sections |=
5818                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5819                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5820                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5821                 rte_memcpy(&ctxt.info, &vsi->info,
5822                         sizeof(struct i40e_aqc_vsi_properties_data));
5823                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5824                                                 I40E_DEFAULT_TCMAP);
5825                 if (ret != I40E_SUCCESS) {
5826                         PMD_DRV_LOG(ERR,
5827                                 "Failed to configure TC queue mapping");
5828                         goto fail_msix_alloc;
5829                 }
5830                 ctxt.seid = vsi->seid;
5831                 ctxt.pf_num = hw->pf_id;
5832                 ctxt.uplink_seid = vsi->uplink_seid;
5833                 ctxt.vf_num = 0;
5834
5835                 /* Update VSI parameters */
5836                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5837                 if (ret != I40E_SUCCESS) {
5838                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5839                         goto fail_msix_alloc;
5840                 }
5841
5842                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5843                                                 sizeof(vsi->info.tc_mapping));
5844                 rte_memcpy(&vsi->info.queue_mapping,
5845                                 &ctxt.info.queue_mapping,
5846                         sizeof(vsi->info.queue_mapping));
5847                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5848                 vsi->info.valid_sections = 0;
5849
5850                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5851                                 ETH_ADDR_LEN);
5852
5853                 /**
5854                  * Updating default filter settings are necessary to prevent
5855                  * reception of tagged packets.
5856                  * Some old firmware configurations load a default macvlan
5857                  * filter which accepts both tagged and untagged packets.
5858                  * The updating is to use a normal filter instead if needed.
5859                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5860                  * The firmware with correct configurations load the default
5861                  * macvlan filter which is expected and cannot be removed.
5862                  */
5863                 i40e_update_default_filter_setting(vsi);
5864                 i40e_config_qinq(hw, vsi);
5865         } else if (type == I40E_VSI_SRIOV) {
5866                 memset(&ctxt, 0, sizeof(ctxt));
5867                 /**
5868                  * For other VSI, the uplink_seid equals to uplink VSI's
5869                  * uplink_seid since they share same VEB
5870                  */
5871                 if (uplink_vsi == NULL)
5872                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5873                 else
5874                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5875                 ctxt.pf_num = hw->pf_id;
5876                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5877                 ctxt.uplink_seid = vsi->uplink_seid;
5878                 ctxt.connection_type = 0x1;
5879                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5880
5881                 /* Use the VEB configuration if FW >= v5.0 */
5882                 if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
5883                         /* Configure switch ID */
5884                         ctxt.info.valid_sections |=
5885                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5886                         ctxt.info.switch_id =
5887                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5888                 }
5889
5890                 /* Configure port/vlan */
5891                 ctxt.info.valid_sections |=
5892                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5893                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5894                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5895                                                 hw->func_caps.enabled_tcmap);
5896                 if (ret != I40E_SUCCESS) {
5897                         PMD_DRV_LOG(ERR,
5898                                 "Failed to configure TC queue mapping");
5899                         goto fail_msix_alloc;
5900                 }
5901
5902                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5903                 ctxt.info.valid_sections |=
5904                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5905                 /**
5906                  * Since VSI is not created yet, only configure parameter,
5907                  * will add vsi below.
5908                  */
5909
5910                 i40e_config_qinq(hw, vsi);
5911         } else if (type == I40E_VSI_VMDQ2) {
5912                 memset(&ctxt, 0, sizeof(ctxt));
5913                 /*
5914                  * For other VSI, the uplink_seid equals to uplink VSI's
5915                  * uplink_seid since they share same VEB
5916                  */
5917                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5918                 ctxt.pf_num = hw->pf_id;
5919                 ctxt.vf_num = 0;
5920                 ctxt.uplink_seid = vsi->uplink_seid;
5921                 ctxt.connection_type = 0x1;
5922                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5923
5924                 ctxt.info.valid_sections |=
5925                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5926                 /* user_param carries flag to enable loop back */
5927                 if (user_param) {
5928                         ctxt.info.switch_id =
5929                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5930                         ctxt.info.switch_id |=
5931                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5932                 }
5933
5934                 /* Configure port/vlan */
5935                 ctxt.info.valid_sections |=
5936                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5937                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5938                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5939                                                 I40E_DEFAULT_TCMAP);
5940                 if (ret != I40E_SUCCESS) {
5941                         PMD_DRV_LOG(ERR,
5942                                 "Failed to configure TC queue mapping");
5943                         goto fail_msix_alloc;
5944                 }
5945                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5946                 ctxt.info.valid_sections |=
5947                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5948         } else if (type == I40E_VSI_FDIR) {
5949                 memset(&ctxt, 0, sizeof(ctxt));
5950                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5951                 ctxt.pf_num = hw->pf_id;
5952                 ctxt.vf_num = 0;
5953                 ctxt.uplink_seid = vsi->uplink_seid;
5954                 ctxt.connection_type = 0x1;     /* regular data port */
5955                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5956                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5957                                                 I40E_DEFAULT_TCMAP);
5958                 if (ret != I40E_SUCCESS) {
5959                         PMD_DRV_LOG(ERR,
5960                                 "Failed to configure TC queue mapping.");
5961                         goto fail_msix_alloc;
5962                 }
5963                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5964                 ctxt.info.valid_sections |=
5965                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5966         } else {
5967                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5968                 goto fail_msix_alloc;
5969         }
5970
5971         if (vsi->type != I40E_VSI_MAIN) {
5972                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5973                 if (ret != I40E_SUCCESS) {
5974                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5975                                     hw->aq.asq_last_status);
5976                         goto fail_msix_alloc;
5977                 }
5978                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5979                 vsi->info.valid_sections = 0;
5980                 vsi->seid = ctxt.seid;
5981                 vsi->vsi_id = ctxt.vsi_number;
5982                 vsi->sib_vsi_list.vsi = vsi;
5983                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5984                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5985                                           &vsi->sib_vsi_list, list);
5986                 } else {
5987                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5988                                           &vsi->sib_vsi_list, list);
5989                 }
5990         }
5991
5992         /* MAC/VLAN configuration */
5993         rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
5994         filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5995
5996         ret = i40e_vsi_add_mac(vsi, &filter);
5997         if (ret != I40E_SUCCESS) {
5998                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5999                 goto fail_msix_alloc;
6000         }
6001
6002         /* Get VSI BW information */
6003         i40e_vsi_get_bw_config(vsi);
6004         return vsi;
6005 fail_msix_alloc:
6006         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
6007 fail_queue_alloc:
6008         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
6009 fail_mem:
6010         rte_free(vsi);
6011         return NULL;
6012 }
6013
6014 /* Configure vlan filter on or off */
6015 int
6016 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
6017 {
6018         int i, num;
6019         struct i40e_mac_filter *f;
6020         void *temp;
6021         struct i40e_mac_filter_info *mac_filter;
6022         enum i40e_mac_filter_type desired_filter;
6023         int ret = I40E_SUCCESS;
6024
6025         if (on) {
6026                 /* Filter to match MAC and VLAN */
6027                 desired_filter = I40E_MACVLAN_PERFECT_MATCH;
6028         } else {
6029                 /* Filter to match only MAC */
6030                 desired_filter = I40E_MAC_PERFECT_MATCH;
6031         }
6032
6033         num = vsi->mac_num;
6034
6035         mac_filter = rte_zmalloc("mac_filter_info_data",
6036                                  num * sizeof(*mac_filter), 0);
6037         if (mac_filter == NULL) {
6038                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6039                 return I40E_ERR_NO_MEMORY;
6040         }
6041
6042         i = 0;
6043
6044         /* Remove all existing mac */
6045         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6046                 mac_filter[i] = f->mac_info;
6047                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6048                 if (ret) {
6049                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6050                                     on ? "enable" : "disable");
6051                         goto DONE;
6052                 }
6053                 i++;
6054         }
6055
6056         /* Override with new filter */
6057         for (i = 0; i < num; i++) {
6058                 mac_filter[i].filter_type = desired_filter;
6059                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6060                 if (ret) {
6061                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6062                                     on ? "enable" : "disable");
6063                         goto DONE;
6064                 }
6065         }
6066
6067 DONE:
6068         rte_free(mac_filter);
6069         return ret;
6070 }
6071
6072 /* Configure vlan stripping on or off */
6073 int
6074 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6075 {
6076         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6077         struct i40e_vsi_context ctxt;
6078         uint8_t vlan_flags;
6079         int ret = I40E_SUCCESS;
6080
6081         /* Check if it has been already on or off */
6082         if (vsi->info.valid_sections &
6083                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6084                 if (on) {
6085                         if ((vsi->info.port_vlan_flags &
6086                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6087                                 return 0; /* already on */
6088                 } else {
6089                         if ((vsi->info.port_vlan_flags &
6090                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6091                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
6092                                 return 0; /* already off */
6093                 }
6094         }
6095
6096         if (on)
6097                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6098         else
6099                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6100         vsi->info.valid_sections =
6101                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6102         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6103         vsi->info.port_vlan_flags |= vlan_flags;
6104         ctxt.seid = vsi->seid;
6105         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6106         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6107         if (ret)
6108                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6109                             on ? "enable" : "disable");
6110
6111         return ret;
6112 }
6113
6114 static int
6115 i40e_dev_init_vlan(struct rte_eth_dev *dev)
6116 {
6117         struct rte_eth_dev_data *data = dev->data;
6118         int ret;
6119         int mask = 0;
6120
6121         /* Apply vlan offload setting */
6122         mask = ETH_VLAN_STRIP_MASK |
6123                ETH_QINQ_STRIP_MASK |
6124                ETH_VLAN_FILTER_MASK |
6125                ETH_VLAN_EXTEND_MASK;
6126         ret = i40e_vlan_offload_set(dev, mask);
6127         if (ret) {
6128                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6129                 return ret;
6130         }
6131
6132         /* Apply pvid setting */
6133         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6134                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
6135         if (ret)
6136                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
6137
6138         return ret;
6139 }
6140
6141 static int
6142 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6143 {
6144         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6145
6146         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6147 }
6148
6149 static int
6150 i40e_update_flow_control(struct i40e_hw *hw)
6151 {
6152 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6153         struct i40e_link_status link_status;
6154         uint32_t rxfc = 0, txfc = 0, reg;
6155         uint8_t an_info;
6156         int ret;
6157
6158         memset(&link_status, 0, sizeof(link_status));
6159         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6160         if (ret != I40E_SUCCESS) {
6161                 PMD_DRV_LOG(ERR, "Failed to get link status information");
6162                 goto write_reg; /* Disable flow control */
6163         }
6164
6165         an_info = hw->phy.link_info.an_info;
6166         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6167                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6168                 ret = I40E_ERR_NOT_READY;
6169                 goto write_reg; /* Disable flow control */
6170         }
6171         /**
6172          * If link auto negotiation is enabled, flow control needs to
6173          * be configured according to it
6174          */
6175         switch (an_info & I40E_LINK_PAUSE_RXTX) {
6176         case I40E_LINK_PAUSE_RXTX:
6177                 rxfc = 1;
6178                 txfc = 1;
6179                 hw->fc.current_mode = I40E_FC_FULL;
6180                 break;
6181         case I40E_AQ_LINK_PAUSE_RX:
6182                 rxfc = 1;
6183                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
6184                 break;
6185         case I40E_AQ_LINK_PAUSE_TX:
6186                 txfc = 1;
6187                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
6188                 break;
6189         default:
6190                 hw->fc.current_mode = I40E_FC_NONE;
6191                 break;
6192         }
6193
6194 write_reg:
6195         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6196                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6197         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6198         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6199         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6200         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6201
6202         return ret;
6203 }
6204
6205 /* PF setup */
6206 static int
6207 i40e_pf_setup(struct i40e_pf *pf)
6208 {
6209         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6210         struct i40e_filter_control_settings settings;
6211         struct i40e_vsi *vsi;
6212         int ret;
6213
6214         /* Clear all stats counters */
6215         pf->offset_loaded = FALSE;
6216         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6217         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6218         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6219         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6220
6221         ret = i40e_pf_get_switch_config(pf);
6222         if (ret != I40E_SUCCESS) {
6223                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6224                 return ret;
6225         }
6226
6227         ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6228         if (ret)
6229                 PMD_INIT_LOG(WARNING,
6230                         "failed to allocate switch domain for device %d", ret);
6231
6232         if (pf->flags & I40E_FLAG_FDIR) {
6233                 /* make queue allocated first, let FDIR use queue pair 0*/
6234                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6235                 if (ret != I40E_FDIR_QUEUE_ID) {
6236                         PMD_DRV_LOG(ERR,
6237                                 "queue allocation fails for FDIR: ret =%d",
6238                                 ret);
6239                         pf->flags &= ~I40E_FLAG_FDIR;
6240                 }
6241         }
6242         /*  main VSI setup */
6243         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6244         if (!vsi) {
6245                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6246                 return I40E_ERR_NOT_READY;
6247         }
6248         pf->main_vsi = vsi;
6249
6250         /* Configure filter control */
6251         memset(&settings, 0, sizeof(settings));
6252         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6253                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6254         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6255                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6256         else {
6257                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6258                         hw->func_caps.rss_table_size);
6259                 return I40E_ERR_PARAM;
6260         }
6261         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6262                 hw->func_caps.rss_table_size);
6263         pf->hash_lut_size = hw->func_caps.rss_table_size;
6264
6265         /* Enable ethtype and macvlan filters */
6266         settings.enable_ethtype = TRUE;
6267         settings.enable_macvlan = TRUE;
6268         ret = i40e_set_filter_control(hw, &settings);
6269         if (ret)
6270                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6271                                                                 ret);
6272
6273         /* Update flow control according to the auto negotiation */
6274         i40e_update_flow_control(hw);
6275
6276         return I40E_SUCCESS;
6277 }
6278
6279 int
6280 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6281 {
6282         uint32_t reg;
6283         uint16_t j;
6284
6285         /**
6286          * Set or clear TX Queue Disable flags,
6287          * which is required by hardware.
6288          */
6289         i40e_pre_tx_queue_cfg(hw, q_idx, on);
6290         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6291
6292         /* Wait until the request is finished */
6293         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6294                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6295                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6296                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6297                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6298                                                         & 0x1))) {
6299                         break;
6300                 }
6301         }
6302         if (on) {
6303                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6304                         return I40E_SUCCESS; /* already on, skip next steps */
6305
6306                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6307                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6308         } else {
6309                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6310                         return I40E_SUCCESS; /* already off, skip next steps */
6311                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6312         }
6313         /* Write the register */
6314         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6315         /* Check the result */
6316         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6317                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6318                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6319                 if (on) {
6320                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6321                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
6322                                 break;
6323                 } else {
6324                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6325                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6326                                 break;
6327                 }
6328         }
6329         /* Check if it is timeout */
6330         if (j >= I40E_CHK_Q_ENA_COUNT) {
6331                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6332                             (on ? "enable" : "disable"), q_idx);
6333                 return I40E_ERR_TIMEOUT;
6334         }
6335
6336         return I40E_SUCCESS;
6337 }
6338
6339 int
6340 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6341 {
6342         uint32_t reg;
6343         uint16_t j;
6344
6345         /* Wait until the request is finished */
6346         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6347                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6348                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6349                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6350                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6351                         break;
6352         }
6353
6354         if (on) {
6355                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6356                         return I40E_SUCCESS; /* Already on, skip next steps */
6357                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6358         } else {
6359                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6360                         return I40E_SUCCESS; /* Already off, skip next steps */
6361                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6362         }
6363
6364         /* Write the register */
6365         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6366         /* Check the result */
6367         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6368                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6369                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6370                 if (on) {
6371                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6372                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
6373                                 break;
6374                 } else {
6375                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6376                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6377                                 break;
6378                 }
6379         }
6380
6381         /* Check if it is timeout */
6382         if (j >= I40E_CHK_Q_ENA_COUNT) {
6383                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6384                             (on ? "enable" : "disable"), q_idx);
6385                 return I40E_ERR_TIMEOUT;
6386         }
6387
6388         return I40E_SUCCESS;
6389 }
6390
6391 /* Initialize VSI for TX */
6392 static int
6393 i40e_dev_tx_init(struct i40e_pf *pf)
6394 {
6395         struct rte_eth_dev_data *data = pf->dev_data;
6396         uint16_t i;
6397         uint32_t ret = I40E_SUCCESS;
6398         struct i40e_tx_queue *txq;
6399
6400         for (i = 0; i < data->nb_tx_queues; i++) {
6401                 txq = data->tx_queues[i];
6402                 if (!txq || !txq->q_set)
6403                         continue;
6404                 ret = i40e_tx_queue_init(txq);
6405                 if (ret != I40E_SUCCESS)
6406                         break;
6407         }
6408         if (ret == I40E_SUCCESS)
6409                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6410                                      ->eth_dev);
6411
6412         return ret;
6413 }
6414
6415 /* Initialize VSI for RX */
6416 static int
6417 i40e_dev_rx_init(struct i40e_pf *pf)
6418 {
6419         struct rte_eth_dev_data *data = pf->dev_data;
6420         int ret = I40E_SUCCESS;
6421         uint16_t i;
6422         struct i40e_rx_queue *rxq;
6423
6424         i40e_pf_config_rss(pf);
6425         for (i = 0; i < data->nb_rx_queues; i++) {
6426                 rxq = data->rx_queues[i];
6427                 if (!rxq || !rxq->q_set)
6428                         continue;
6429
6430                 ret = i40e_rx_queue_init(rxq);
6431                 if (ret != I40E_SUCCESS) {
6432                         PMD_DRV_LOG(ERR,
6433                                 "Failed to do RX queue initialization");
6434                         break;
6435                 }
6436         }
6437         if (ret == I40E_SUCCESS)
6438                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6439                                      ->eth_dev);
6440
6441         return ret;
6442 }
6443
6444 static int
6445 i40e_dev_rxtx_init(struct i40e_pf *pf)
6446 {
6447         int err;
6448
6449         err = i40e_dev_tx_init(pf);
6450         if (err) {
6451                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6452                 return err;
6453         }
6454         err = i40e_dev_rx_init(pf);
6455         if (err) {
6456                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6457                 return err;
6458         }
6459
6460         return err;
6461 }
6462
6463 static int
6464 i40e_vmdq_setup(struct rte_eth_dev *dev)
6465 {
6466         struct rte_eth_conf *conf = &dev->data->dev_conf;
6467         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6468         int i, err, conf_vsis, j, loop;
6469         struct i40e_vsi *vsi;
6470         struct i40e_vmdq_info *vmdq_info;
6471         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6472         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6473
6474         /*
6475          * Disable interrupt to avoid message from VF. Furthermore, it will
6476          * avoid race condition in VSI creation/destroy.
6477          */
6478         i40e_pf_disable_irq0(hw);
6479
6480         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6481                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6482                 return -ENOTSUP;
6483         }
6484
6485         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6486         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6487                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6488                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6489                         pf->max_nb_vmdq_vsi);
6490                 return -ENOTSUP;
6491         }
6492
6493         if (pf->vmdq != NULL) {
6494                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6495                 return 0;
6496         }
6497
6498         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6499                                 sizeof(*vmdq_info) * conf_vsis, 0);
6500
6501         if (pf->vmdq == NULL) {
6502                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6503                 return -ENOMEM;
6504         }
6505
6506         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6507
6508         /* Create VMDQ VSI */
6509         for (i = 0; i < conf_vsis; i++) {
6510                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6511                                 vmdq_conf->enable_loop_back);
6512                 if (vsi == NULL) {
6513                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6514                         err = -1;
6515                         goto err_vsi_setup;
6516                 }
6517                 vmdq_info = &pf->vmdq[i];
6518                 vmdq_info->pf = pf;
6519                 vmdq_info->vsi = vsi;
6520         }
6521         pf->nb_cfg_vmdq_vsi = conf_vsis;
6522
6523         /* Configure Vlan */
6524         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6525         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6526                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6527                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6528                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6529                                         vmdq_conf->pool_map[i].vlan_id, j);
6530
6531                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6532                                                 vmdq_conf->pool_map[i].vlan_id);
6533                                 if (err) {
6534                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6535                                         err = -1;
6536                                         goto err_vsi_setup;
6537                                 }
6538                         }
6539                 }
6540         }
6541
6542         i40e_pf_enable_irq0(hw);
6543
6544         return 0;
6545
6546 err_vsi_setup:
6547         for (i = 0; i < conf_vsis; i++)
6548                 if (pf->vmdq[i].vsi == NULL)
6549                         break;
6550                 else
6551                         i40e_vsi_release(pf->vmdq[i].vsi);
6552
6553         rte_free(pf->vmdq);
6554         pf->vmdq = NULL;
6555         i40e_pf_enable_irq0(hw);
6556         return err;
6557 }
6558
6559 static void
6560 i40e_stat_update_32(struct i40e_hw *hw,
6561                    uint32_t reg,
6562                    bool offset_loaded,
6563                    uint64_t *offset,
6564                    uint64_t *stat)
6565 {
6566         uint64_t new_data;
6567
6568         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6569         if (!offset_loaded)
6570                 *offset = new_data;
6571
6572         if (new_data >= *offset)
6573                 *stat = (uint64_t)(new_data - *offset);
6574         else
6575                 *stat = (uint64_t)((new_data +
6576                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6577 }
6578
6579 static void
6580 i40e_stat_update_48(struct i40e_hw *hw,
6581                    uint32_t hireg,
6582                    uint32_t loreg,
6583                    bool offset_loaded,
6584                    uint64_t *offset,
6585                    uint64_t *stat)
6586 {
6587         uint64_t new_data;
6588
6589         if (hw->device_id == I40E_DEV_ID_QEMU) {
6590                 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6591                 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6592                                 I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6593         } else {
6594                 new_data = I40E_READ_REG64(hw, loreg);
6595         }
6596
6597         if (!offset_loaded)
6598                 *offset = new_data;
6599
6600         if (new_data >= *offset)
6601                 *stat = new_data - *offset;
6602         else
6603                 *stat = (uint64_t)((new_data +
6604                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6605
6606         *stat &= I40E_48_BIT_MASK;
6607 }
6608
6609 /* Disable IRQ0 */
6610 void
6611 i40e_pf_disable_irq0(struct i40e_hw *hw)
6612 {
6613         /* Disable all interrupt types */
6614         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6615                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6616         I40E_WRITE_FLUSH(hw);
6617 }
6618
6619 /* Enable IRQ0 */
6620 void
6621 i40e_pf_enable_irq0(struct i40e_hw *hw)
6622 {
6623         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6624                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6625                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6626                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6627         I40E_WRITE_FLUSH(hw);
6628 }
6629
6630 static void
6631 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6632 {
6633         /* read pending request and disable first */
6634         i40e_pf_disable_irq0(hw);
6635         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6636         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6637                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6638
6639         if (no_queue)
6640                 /* Link no queues with irq0 */
6641                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6642                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6643 }
6644
6645 static void
6646 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6647 {
6648         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6649         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6650         int i;
6651         uint16_t abs_vf_id;
6652         uint32_t index, offset, val;
6653
6654         if (!pf->vfs)
6655                 return;
6656         /**
6657          * Try to find which VF trigger a reset, use absolute VF id to access
6658          * since the reg is global register.
6659          */
6660         for (i = 0; i < pf->vf_num; i++) {
6661                 abs_vf_id = hw->func_caps.vf_base_id + i;
6662                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6663                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6664                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6665                 /* VFR event occurred */
6666                 if (val & (0x1 << offset)) {
6667                         int ret;
6668
6669                         /* Clear the event first */
6670                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6671                                                         (0x1 << offset));
6672                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6673                         /**
6674                          * Only notify a VF reset event occurred,
6675                          * don't trigger another SW reset
6676                          */
6677                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6678                         if (ret != I40E_SUCCESS)
6679                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6680                 }
6681         }
6682 }
6683
6684 static void
6685 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6686 {
6687         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6688         int i;
6689
6690         for (i = 0; i < pf->vf_num; i++)
6691                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6692 }
6693
6694 static void
6695 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6696 {
6697         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6698         struct i40e_arq_event_info info;
6699         uint16_t pending, opcode;
6700         int ret;
6701
6702         info.buf_len = I40E_AQ_BUF_SZ;
6703         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6704         if (!info.msg_buf) {
6705                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6706                 return;
6707         }
6708
6709         pending = 1;
6710         while (pending) {
6711                 ret = i40e_clean_arq_element(hw, &info, &pending);
6712
6713                 if (ret != I40E_SUCCESS) {
6714                         PMD_DRV_LOG(INFO,
6715                                 "Failed to read msg from AdminQ, aq_err: %u",
6716                                 hw->aq.asq_last_status);
6717                         break;
6718                 }
6719                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6720
6721                 switch (opcode) {
6722                 case i40e_aqc_opc_send_msg_to_pf:
6723                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6724                         i40e_pf_host_handle_vf_msg(dev,
6725                                         rte_le_to_cpu_16(info.desc.retval),
6726                                         rte_le_to_cpu_32(info.desc.cookie_high),
6727                                         rte_le_to_cpu_32(info.desc.cookie_low),
6728                                         info.msg_buf,
6729                                         info.msg_len);
6730                         break;
6731                 case i40e_aqc_opc_get_link_status:
6732                         ret = i40e_dev_link_update(dev, 0);
6733                         if (!ret)
6734                                 rte_eth_dev_callback_process(dev,
6735                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6736                         break;
6737                 default:
6738                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6739                                     opcode);
6740                         break;
6741                 }
6742         }
6743         rte_free(info.msg_buf);
6744 }
6745
6746 static void
6747 i40e_handle_mdd_event(struct rte_eth_dev *dev)
6748 {
6749 #define I40E_MDD_CLEAR32 0xFFFFFFFF
6750 #define I40E_MDD_CLEAR16 0xFFFF
6751         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6752         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6753         bool mdd_detected = false;
6754         struct i40e_pf_vf *vf;
6755         uint32_t reg;
6756         int i;
6757
6758         /* find what triggered the MDD event */
6759         reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6760         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6761                 uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6762                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6763                 uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6764                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6765                 uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6766                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6767                 uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6768                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6769                                         hw->func_caps.base_queue;
6770                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6771                         "queue %d PF number 0x%02x VF number 0x%02x device %s\n",
6772                                 event, queue, pf_num, vf_num, dev->data->name);
6773                 I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6774                 mdd_detected = true;
6775         }
6776         reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6777         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6778                 uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6779                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6780                 uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6781                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6782                 uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6783                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6784                                         hw->func_caps.base_queue;
6785
6786                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6787                                 "queue %d of function 0x%02x device %s\n",
6788                                         event, queue, func, dev->data->name);
6789                 I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6790                 mdd_detected = true;
6791         }
6792
6793         if (mdd_detected) {
6794                 reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6795                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6796                         I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6797                         PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
6798                 }
6799                 reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6800                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6801                         I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6802                                         I40E_MDD_CLEAR16);
6803                         PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
6804                 }
6805         }
6806
6807         /* see if one of the VFs needs its hand slapped */
6808         for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6809                 vf = &pf->vfs[i];
6810                 reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6811                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6812                         I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
6813                                         I40E_MDD_CLEAR16);
6814                         vf->num_mdd_events++;
6815                         PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
6816                                         PRIu64 "times\n",
6817                                         i, vf->num_mdd_events);
6818                 }
6819
6820                 reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
6821                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6822                         I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
6823                                         I40E_MDD_CLEAR16);
6824                         vf->num_mdd_events++;
6825                         PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
6826                                         PRIu64 "times\n",
6827                                         i, vf->num_mdd_events);
6828                 }
6829         }
6830 }
6831
6832 /**
6833  * Interrupt handler triggered by NIC  for handling
6834  * specific interrupt.
6835  *
6836  * @param handle
6837  *  Pointer to interrupt handle.
6838  * @param param
6839  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6840  *
6841  * @return
6842  *  void
6843  */
6844 static void
6845 i40e_dev_interrupt_handler(void *param)
6846 {
6847         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6848         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6849         uint32_t icr0;
6850
6851         /* Disable interrupt */
6852         i40e_pf_disable_irq0(hw);
6853
6854         /* read out interrupt causes */
6855         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6856
6857         /* No interrupt event indicated */
6858         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6859                 PMD_DRV_LOG(INFO, "No interrupt event");
6860                 goto done;
6861         }
6862         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6863                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6864         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6865                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6866                 i40e_handle_mdd_event(dev);
6867         }
6868         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6869                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6870         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6871                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6872         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6873                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6874         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6875                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6876         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6877                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6878
6879         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6880                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6881                 i40e_dev_handle_vfr_event(dev);
6882         }
6883         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6884                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6885                 i40e_dev_handle_aq_msg(dev);
6886         }
6887
6888 done:
6889         /* Enable interrupt */
6890         i40e_pf_enable_irq0(hw);
6891 }
6892
6893 static void
6894 i40e_dev_alarm_handler(void *param)
6895 {
6896         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6897         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6898         uint32_t icr0;
6899
6900         /* Disable interrupt */
6901         i40e_pf_disable_irq0(hw);
6902
6903         /* read out interrupt causes */
6904         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6905
6906         /* No interrupt event indicated */
6907         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
6908                 goto done;
6909         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6910                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6911         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6912                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6913                 i40e_handle_mdd_event(dev);
6914         }
6915         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6916                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6917         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6918                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6919         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6920                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6921         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6922                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6923         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6924                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6925
6926         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6927                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6928                 i40e_dev_handle_vfr_event(dev);
6929         }
6930         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6931                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6932                 i40e_dev_handle_aq_msg(dev);
6933         }
6934
6935 done:
6936         /* Enable interrupt */
6937         i40e_pf_enable_irq0(hw);
6938         rte_eal_alarm_set(I40E_ALARM_INTERVAL,
6939                           i40e_dev_alarm_handler, dev);
6940 }
6941
6942 int
6943 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6944                          struct i40e_macvlan_filter *filter,
6945                          int total)
6946 {
6947         int ele_num, ele_buff_size;
6948         int num, actual_num, i;
6949         uint16_t flags;
6950         int ret = I40E_SUCCESS;
6951         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6952         struct i40e_aqc_add_macvlan_element_data *req_list;
6953
6954         if (filter == NULL  || total == 0)
6955                 return I40E_ERR_PARAM;
6956         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6957         ele_buff_size = hw->aq.asq_buf_size;
6958
6959         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6960         if (req_list == NULL) {
6961                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6962                 return I40E_ERR_NO_MEMORY;
6963         }
6964
6965         num = 0;
6966         do {
6967                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6968                 memset(req_list, 0, ele_buff_size);
6969
6970                 for (i = 0; i < actual_num; i++) {
6971                         rte_memcpy(req_list[i].mac_addr,
6972                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6973                         req_list[i].vlan_tag =
6974                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6975
6976                         switch (filter[num + i].filter_type) {
6977                         case I40E_MAC_PERFECT_MATCH:
6978                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6979                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6980                                 break;
6981                         case I40E_MACVLAN_PERFECT_MATCH:
6982                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6983                                 break;
6984                         case I40E_MAC_HASH_MATCH:
6985                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6986                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6987                                 break;
6988                         case I40E_MACVLAN_HASH_MATCH:
6989                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6990                                 break;
6991                         default:
6992                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6993                                 ret = I40E_ERR_PARAM;
6994                                 goto DONE;
6995                         }
6996
6997                         req_list[i].queue_number = 0;
6998
6999                         req_list[i].flags = rte_cpu_to_le_16(flags);
7000                 }
7001
7002                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
7003                                                 actual_num, NULL);
7004                 if (ret != I40E_SUCCESS) {
7005                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
7006                         goto DONE;
7007                 }
7008                 num += actual_num;
7009         } while (num < total);
7010
7011 DONE:
7012         rte_free(req_list);
7013         return ret;
7014 }
7015
7016 int
7017 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
7018                             struct i40e_macvlan_filter *filter,
7019                             int total)
7020 {
7021         int ele_num, ele_buff_size;
7022         int num, actual_num, i;
7023         uint16_t flags;
7024         int ret = I40E_SUCCESS;
7025         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7026         struct i40e_aqc_remove_macvlan_element_data *req_list;
7027
7028         if (filter == NULL  || total == 0)
7029                 return I40E_ERR_PARAM;
7030
7031         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7032         ele_buff_size = hw->aq.asq_buf_size;
7033
7034         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7035         if (req_list == NULL) {
7036                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
7037                 return I40E_ERR_NO_MEMORY;
7038         }
7039
7040         num = 0;
7041         do {
7042                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7043                 memset(req_list, 0, ele_buff_size);
7044
7045                 for (i = 0; i < actual_num; i++) {
7046                         rte_memcpy(req_list[i].mac_addr,
7047                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
7048                         req_list[i].vlan_tag =
7049                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
7050
7051                         switch (filter[num + i].filter_type) {
7052                         case I40E_MAC_PERFECT_MATCH:
7053                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7054                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7055                                 break;
7056                         case I40E_MACVLAN_PERFECT_MATCH:
7057                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7058                                 break;
7059                         case I40E_MAC_HASH_MATCH:
7060                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7061                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7062                                 break;
7063                         case I40E_MACVLAN_HASH_MATCH:
7064                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7065                                 break;
7066                         default:
7067                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7068                                 ret = I40E_ERR_PARAM;
7069                                 goto DONE;
7070                         }
7071                         req_list[i].flags = rte_cpu_to_le_16(flags);
7072                 }
7073
7074                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
7075                                                 actual_num, NULL);
7076                 if (ret != I40E_SUCCESS) {
7077                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7078                         goto DONE;
7079                 }
7080                 num += actual_num;
7081         } while (num < total);
7082
7083 DONE:
7084         rte_free(req_list);
7085         return ret;
7086 }
7087
7088 /* Find out specific MAC filter */
7089 static struct i40e_mac_filter *
7090 i40e_find_mac_filter(struct i40e_vsi *vsi,
7091                          struct rte_ether_addr *macaddr)
7092 {
7093         struct i40e_mac_filter *f;
7094
7095         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7096                 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7097                         return f;
7098         }
7099
7100         return NULL;
7101 }
7102
7103 static bool
7104 i40e_find_vlan_filter(struct i40e_vsi *vsi,
7105                          uint16_t vlan_id)
7106 {
7107         uint32_t vid_idx, vid_bit;
7108
7109         if (vlan_id > ETH_VLAN_ID_MAX)
7110                 return 0;
7111
7112         vid_idx = I40E_VFTA_IDX(vlan_id);
7113         vid_bit = I40E_VFTA_BIT(vlan_id);
7114
7115         if (vsi->vfta[vid_idx] & vid_bit)
7116                 return 1;
7117         else
7118                 return 0;
7119 }
7120
7121 static void
7122 i40e_store_vlan_filter(struct i40e_vsi *vsi,
7123                        uint16_t vlan_id, bool on)
7124 {
7125         uint32_t vid_idx, vid_bit;
7126
7127         vid_idx = I40E_VFTA_IDX(vlan_id);
7128         vid_bit = I40E_VFTA_BIT(vlan_id);
7129
7130         if (on)
7131                 vsi->vfta[vid_idx] |= vid_bit;
7132         else
7133                 vsi->vfta[vid_idx] &= ~vid_bit;
7134 }
7135
7136 void
7137 i40e_set_vlan_filter(struct i40e_vsi *vsi,
7138                      uint16_t vlan_id, bool on)
7139 {
7140         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7141         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7142         int ret;
7143
7144         if (vlan_id > ETH_VLAN_ID_MAX)
7145                 return;
7146
7147         i40e_store_vlan_filter(vsi, vlan_id, on);
7148
7149         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7150                 return;
7151
7152         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7153
7154         if (on) {
7155                 ret = i40e_aq_add_vlan(hw, vsi->seid,
7156                                        &vlan_data, 1, NULL);
7157                 if (ret != I40E_SUCCESS)
7158                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7159         } else {
7160                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
7161                                           &vlan_data, 1, NULL);
7162                 if (ret != I40E_SUCCESS)
7163                         PMD_DRV_LOG(ERR,
7164                                     "Failed to remove vlan filter");
7165         }
7166 }
7167
7168 /**
7169  * Find all vlan options for specific mac addr,
7170  * return with actual vlan found.
7171  */
7172 int
7173 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7174                            struct i40e_macvlan_filter *mv_f,
7175                            int num, struct rte_ether_addr *addr)
7176 {
7177         int i;
7178         uint32_t j, k;
7179
7180         /**
7181          * Not to use i40e_find_vlan_filter to decrease the loop time,
7182          * although the code looks complex.
7183           */
7184         if (num < vsi->vlan_num)
7185                 return I40E_ERR_PARAM;
7186
7187         i = 0;
7188         for (j = 0; j < I40E_VFTA_SIZE; j++) {
7189                 if (vsi->vfta[j]) {
7190                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7191                                 if (vsi->vfta[j] & (1 << k)) {
7192                                         if (i > num - 1) {
7193                                                 PMD_DRV_LOG(ERR,
7194                                                         "vlan number doesn't match");
7195                                                 return I40E_ERR_PARAM;
7196                                         }
7197                                         rte_memcpy(&mv_f[i].macaddr,
7198                                                         addr, ETH_ADDR_LEN);
7199                                         mv_f[i].vlan_id =
7200                                                 j * I40E_UINT32_BIT_SIZE + k;
7201                                         i++;
7202                                 }
7203                         }
7204                 }
7205         }
7206         return I40E_SUCCESS;
7207 }
7208
7209 static inline int
7210 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7211                            struct i40e_macvlan_filter *mv_f,
7212                            int num,
7213                            uint16_t vlan)
7214 {
7215         int i = 0;
7216         struct i40e_mac_filter *f;
7217
7218         if (num < vsi->mac_num)
7219                 return I40E_ERR_PARAM;
7220
7221         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7222                 if (i > num - 1) {
7223                         PMD_DRV_LOG(ERR, "buffer number not match");
7224                         return I40E_ERR_PARAM;
7225                 }
7226                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7227                                 ETH_ADDR_LEN);
7228                 mv_f[i].vlan_id = vlan;
7229                 mv_f[i].filter_type = f->mac_info.filter_type;
7230                 i++;
7231         }
7232
7233         return I40E_SUCCESS;
7234 }
7235
7236 static int
7237 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7238 {
7239         int i, j, num;
7240         struct i40e_mac_filter *f;
7241         struct i40e_macvlan_filter *mv_f;
7242         int ret = I40E_SUCCESS;
7243
7244         if (vsi == NULL || vsi->mac_num == 0)
7245                 return I40E_ERR_PARAM;
7246
7247         /* Case that no vlan is set */
7248         if (vsi->vlan_num == 0)
7249                 num = vsi->mac_num;
7250         else
7251                 num = vsi->mac_num * vsi->vlan_num;
7252
7253         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7254         if (mv_f == NULL) {
7255                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7256                 return I40E_ERR_NO_MEMORY;
7257         }
7258
7259         i = 0;
7260         if (vsi->vlan_num == 0) {
7261                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7262                         rte_memcpy(&mv_f[i].macaddr,
7263                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
7264                         mv_f[i].filter_type = f->mac_info.filter_type;
7265                         mv_f[i].vlan_id = 0;
7266                         i++;
7267                 }
7268         } else {
7269                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7270                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7271                                         vsi->vlan_num, &f->mac_info.mac_addr);
7272                         if (ret != I40E_SUCCESS)
7273                                 goto DONE;
7274                         for (j = i; j < i + vsi->vlan_num; j++)
7275                                 mv_f[j].filter_type = f->mac_info.filter_type;
7276                         i += vsi->vlan_num;
7277                 }
7278         }
7279
7280         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7281 DONE:
7282         rte_free(mv_f);
7283
7284         return ret;
7285 }
7286
7287 int
7288 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7289 {
7290         struct i40e_macvlan_filter *mv_f;
7291         int mac_num;
7292         int ret = I40E_SUCCESS;
7293
7294         if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7295                 return I40E_ERR_PARAM;
7296
7297         /* If it's already set, just return */
7298         if (i40e_find_vlan_filter(vsi,vlan))
7299                 return I40E_SUCCESS;
7300
7301         mac_num = vsi->mac_num;
7302
7303         if (mac_num == 0) {
7304                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7305                 return I40E_ERR_PARAM;
7306         }
7307
7308         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7309
7310         if (mv_f == NULL) {
7311                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7312                 return I40E_ERR_NO_MEMORY;
7313         }
7314
7315         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7316
7317         if (ret != I40E_SUCCESS)
7318                 goto DONE;
7319
7320         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7321
7322         if (ret != I40E_SUCCESS)
7323                 goto DONE;
7324
7325         i40e_set_vlan_filter(vsi, vlan, 1);
7326
7327         vsi->vlan_num++;
7328         ret = I40E_SUCCESS;
7329 DONE:
7330         rte_free(mv_f);
7331         return ret;
7332 }
7333
7334 int
7335 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7336 {
7337         struct i40e_macvlan_filter *mv_f;
7338         int mac_num;
7339         int ret = I40E_SUCCESS;
7340
7341         /**
7342          * Vlan 0 is the generic filter for untagged packets
7343          * and can't be removed.
7344          */
7345         if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7346                 return I40E_ERR_PARAM;
7347
7348         /* If can't find it, just return */
7349         if (!i40e_find_vlan_filter(vsi, vlan))
7350                 return I40E_ERR_PARAM;
7351
7352         mac_num = vsi->mac_num;
7353
7354         if (mac_num == 0) {
7355                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7356                 return I40E_ERR_PARAM;
7357         }
7358
7359         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7360
7361         if (mv_f == NULL) {
7362                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7363                 return I40E_ERR_NO_MEMORY;
7364         }
7365
7366         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7367
7368         if (ret != I40E_SUCCESS)
7369                 goto DONE;
7370
7371         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7372
7373         if (ret != I40E_SUCCESS)
7374                 goto DONE;
7375
7376         /* This is last vlan to remove, replace all mac filter with vlan 0 */
7377         if (vsi->vlan_num == 1) {
7378                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7379                 if (ret != I40E_SUCCESS)
7380                         goto DONE;
7381
7382                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7383                 if (ret != I40E_SUCCESS)
7384                         goto DONE;
7385         }
7386
7387         i40e_set_vlan_filter(vsi, vlan, 0);
7388
7389         vsi->vlan_num--;
7390         ret = I40E_SUCCESS;
7391 DONE:
7392         rte_free(mv_f);
7393         return ret;
7394 }
7395
7396 int
7397 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7398 {
7399         struct i40e_mac_filter *f;
7400         struct i40e_macvlan_filter *mv_f;
7401         int i, vlan_num = 0;
7402         int ret = I40E_SUCCESS;
7403
7404         /* If it's add and we've config it, return */
7405         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7406         if (f != NULL)
7407                 return I40E_SUCCESS;
7408         if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7409                 mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7410
7411                 /**
7412                  * If vlan_num is 0, that's the first time to add mac,
7413                  * set mask for vlan_id 0.
7414                  */
7415                 if (vsi->vlan_num == 0) {
7416                         i40e_set_vlan_filter(vsi, 0, 1);
7417                         vsi->vlan_num = 1;
7418                 }
7419                 vlan_num = vsi->vlan_num;
7420         } else if (mac_filter->filter_type == I40E_MAC_PERFECT_MATCH ||
7421                         mac_filter->filter_type == I40E_MAC_HASH_MATCH)
7422                 vlan_num = 1;
7423
7424         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7425         if (mv_f == NULL) {
7426                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7427                 return I40E_ERR_NO_MEMORY;
7428         }
7429
7430         for (i = 0; i < vlan_num; i++) {
7431                 mv_f[i].filter_type = mac_filter->filter_type;
7432                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7433                                 ETH_ADDR_LEN);
7434         }
7435
7436         if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7437                 mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7438                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7439                                         &mac_filter->mac_addr);
7440                 if (ret != I40E_SUCCESS)
7441                         goto DONE;
7442         }
7443
7444         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7445         if (ret != I40E_SUCCESS)
7446                 goto DONE;
7447
7448         /* Add the mac addr into mac list */
7449         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7450         if (f == NULL) {
7451                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7452                 ret = I40E_ERR_NO_MEMORY;
7453                 goto DONE;
7454         }
7455         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7456                         ETH_ADDR_LEN);
7457         f->mac_info.filter_type = mac_filter->filter_type;
7458         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7459         vsi->mac_num++;
7460
7461         ret = I40E_SUCCESS;
7462 DONE:
7463         rte_free(mv_f);
7464
7465         return ret;
7466 }
7467
7468 int
7469 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7470 {
7471         struct i40e_mac_filter *f;
7472         struct i40e_macvlan_filter *mv_f;
7473         int i, vlan_num;
7474         enum i40e_mac_filter_type filter_type;
7475         int ret = I40E_SUCCESS;
7476
7477         /* Can't find it, return an error */
7478         f = i40e_find_mac_filter(vsi, addr);
7479         if (f == NULL)
7480                 return I40E_ERR_PARAM;
7481
7482         vlan_num = vsi->vlan_num;
7483         filter_type = f->mac_info.filter_type;
7484         if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7485                 filter_type == I40E_MACVLAN_HASH_MATCH) {
7486                 if (vlan_num == 0) {
7487                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7488                         return I40E_ERR_PARAM;
7489                 }
7490         } else if (filter_type == I40E_MAC_PERFECT_MATCH ||
7491                         filter_type == I40E_MAC_HASH_MATCH)
7492                 vlan_num = 1;
7493
7494         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7495         if (mv_f == NULL) {
7496                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7497                 return I40E_ERR_NO_MEMORY;
7498         }
7499
7500         for (i = 0; i < vlan_num; i++) {
7501                 mv_f[i].filter_type = filter_type;
7502                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7503                                 ETH_ADDR_LEN);
7504         }
7505         if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7506                         filter_type == I40E_MACVLAN_HASH_MATCH) {
7507                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7508                 if (ret != I40E_SUCCESS)
7509                         goto DONE;
7510         }
7511
7512         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7513         if (ret != I40E_SUCCESS)
7514                 goto DONE;
7515
7516         /* Remove the mac addr into mac list */
7517         TAILQ_REMOVE(&vsi->mac_list, f, next);
7518         rte_free(f);
7519         vsi->mac_num--;
7520
7521         ret = I40E_SUCCESS;
7522 DONE:
7523         rte_free(mv_f);
7524         return ret;
7525 }
7526
7527 /* Configure hash enable flags for RSS */
7528 uint64_t
7529 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7530 {
7531         uint64_t hena = 0;
7532         int i;
7533
7534         if (!flags)
7535                 return hena;
7536
7537         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7538                 if (flags & (1ULL << i))
7539                         hena |= adapter->pctypes_tbl[i];
7540         }
7541
7542         return hena;
7543 }
7544
7545 /* Parse the hash enable flags */
7546 uint64_t
7547 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7548 {
7549         uint64_t rss_hf = 0;
7550
7551         if (!flags)
7552                 return rss_hf;
7553         int i;
7554
7555         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7556                 if (flags & adapter->pctypes_tbl[i])
7557                         rss_hf |= (1ULL << i);
7558         }
7559         return rss_hf;
7560 }
7561
7562 /* Disable RSS */
7563 void
7564 i40e_pf_disable_rss(struct i40e_pf *pf)
7565 {
7566         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7567
7568         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7569         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7570         I40E_WRITE_FLUSH(hw);
7571 }
7572
7573 int
7574 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7575 {
7576         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7577         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7578         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7579                            I40E_VFQF_HKEY_MAX_INDEX :
7580                            I40E_PFQF_HKEY_MAX_INDEX;
7581
7582         if (!key || key_len == 0) {
7583                 PMD_DRV_LOG(DEBUG, "No key to be configured");
7584                 return 0;
7585         } else if (key_len != (key_idx + 1) *
7586                 sizeof(uint32_t)) {
7587                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7588                 return -EINVAL;
7589         }
7590
7591         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7592                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7593                                 (struct i40e_aqc_get_set_rss_key_data *)key;
7594                 enum i40e_status_code status =
7595                                 i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7596
7597                 if (status) {
7598                         PMD_DRV_LOG(ERR,
7599                                     "Failed to configure RSS key via AQ, error status: %d",
7600                                     status);
7601                         return -EIO;
7602                 }
7603         } else {
7604                 uint32_t *hash_key = (uint32_t *)key;
7605                 uint16_t i;
7606
7607                 if (vsi->type == I40E_VSI_SRIOV) {
7608                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7609                                 I40E_WRITE_REG(
7610                                         hw,
7611                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7612                                         hash_key[i]);
7613
7614                 } else {
7615                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7616                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7617                                                hash_key[i]);
7618                 }
7619                 I40E_WRITE_FLUSH(hw);
7620         }
7621
7622         return 0;
7623 }
7624
7625 static int
7626 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7627 {
7628         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7629         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7630         uint32_t reg;
7631         int ret;
7632
7633         if (!key || !key_len)
7634                 return 0;
7635
7636         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7637                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7638                         (struct i40e_aqc_get_set_rss_key_data *)key);
7639                 if (ret) {
7640                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7641                         return ret;
7642                 }
7643         } else {
7644                 uint32_t *key_dw = (uint32_t *)key;
7645                 uint16_t i;
7646
7647                 if (vsi->type == I40E_VSI_SRIOV) {
7648                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7649                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7650                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7651                         }
7652                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7653                                    sizeof(uint32_t);
7654                 } else {
7655                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7656                                 reg = I40E_PFQF_HKEY(i);
7657                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7658                         }
7659                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7660                                    sizeof(uint32_t);
7661                 }
7662         }
7663         return 0;
7664 }
7665
7666 static int
7667 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7668 {
7669         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7670         uint64_t hena;
7671         int ret;
7672
7673         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7674                                rss_conf->rss_key_len);
7675         if (ret)
7676                 return ret;
7677
7678         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7679         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7680         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7681         I40E_WRITE_FLUSH(hw);
7682
7683         return 0;
7684 }
7685
7686 static int
7687 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7688                          struct rte_eth_rss_conf *rss_conf)
7689 {
7690         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7691         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7692         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7693         uint64_t hena;
7694
7695         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7696         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7697
7698         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7699                 if (rss_hf != 0) /* Enable RSS */
7700                         return -EINVAL;
7701                 return 0; /* Nothing to do */
7702         }
7703         /* RSS enabled */
7704         if (rss_hf == 0) /* Disable RSS */
7705                 return -EINVAL;
7706
7707         return i40e_hw_rss_hash_set(pf, rss_conf);
7708 }
7709
7710 static int
7711 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7712                            struct rte_eth_rss_conf *rss_conf)
7713 {
7714         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7715         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7716         uint64_t hena;
7717         int ret;
7718
7719         if (!rss_conf)
7720                 return -EINVAL;
7721
7722         ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7723                          &rss_conf->rss_key_len);
7724         if (ret)
7725                 return ret;
7726
7727         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7728         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7729         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7730
7731         return 0;
7732 }
7733
7734 static int
7735 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7736 {
7737         switch (filter_type) {
7738         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7739                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7740                 break;
7741         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7742                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7743                 break;
7744         case RTE_TUNNEL_FILTER_IMAC_TENID:
7745                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7746                 break;
7747         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7748                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7749                 break;
7750         case ETH_TUNNEL_FILTER_IMAC:
7751                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7752                 break;
7753         case ETH_TUNNEL_FILTER_OIP:
7754                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7755                 break;
7756         case ETH_TUNNEL_FILTER_IIP:
7757                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7758                 break;
7759         default:
7760                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7761                 return -EINVAL;
7762         }
7763
7764         return 0;
7765 }
7766
7767 /* Convert tunnel filter structure */
7768 static int
7769 i40e_tunnel_filter_convert(
7770         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7771         struct i40e_tunnel_filter *tunnel_filter)
7772 {
7773         rte_ether_addr_copy((struct rte_ether_addr *)
7774                         &cld_filter->element.outer_mac,
7775                 (struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7776         rte_ether_addr_copy((struct rte_ether_addr *)
7777                         &cld_filter->element.inner_mac,
7778                 (struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7779         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7780         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7781              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7782             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7783                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7784         else
7785                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7786         tunnel_filter->input.flags = cld_filter->element.flags;
7787         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7788         tunnel_filter->queue = cld_filter->element.queue_number;
7789         rte_memcpy(tunnel_filter->input.general_fields,
7790                    cld_filter->general_fields,
7791                    sizeof(cld_filter->general_fields));
7792
7793         return 0;
7794 }
7795
7796 /* Check if there exists the tunnel filter */
7797 struct i40e_tunnel_filter *
7798 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7799                              const struct i40e_tunnel_filter_input *input)
7800 {
7801         int ret;
7802
7803         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7804         if (ret < 0)
7805                 return NULL;
7806
7807         return tunnel_rule->hash_map[ret];
7808 }
7809
7810 /* Add a tunnel filter into the SW list */
7811 static int
7812 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7813                              struct i40e_tunnel_filter *tunnel_filter)
7814 {
7815         struct i40e_tunnel_rule *rule = &pf->tunnel;
7816         int ret;
7817
7818         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7819         if (ret < 0) {
7820                 PMD_DRV_LOG(ERR,
7821                             "Failed to insert tunnel filter to hash table %d!",
7822                             ret);
7823                 return ret;
7824         }
7825         rule->hash_map[ret] = tunnel_filter;
7826
7827         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7828
7829         return 0;
7830 }
7831
7832 /* Delete a tunnel filter from the SW list */
7833 int
7834 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7835                           struct i40e_tunnel_filter_input *input)
7836 {
7837         struct i40e_tunnel_rule *rule = &pf->tunnel;
7838         struct i40e_tunnel_filter *tunnel_filter;
7839         int ret;
7840
7841         ret = rte_hash_del_key(rule->hash_table, input);
7842         if (ret < 0) {
7843                 PMD_DRV_LOG(ERR,
7844                             "Failed to delete tunnel filter to hash table %d!",
7845                             ret);
7846                 return ret;
7847         }
7848         tunnel_filter = rule->hash_map[ret];
7849         rule->hash_map[ret] = NULL;
7850
7851         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7852         rte_free(tunnel_filter);
7853
7854         return 0;
7855 }
7856
7857 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7858 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7859 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7860 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7861 #define I40E_TR_GRE_KEY_MASK                    0x400
7862 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7863 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7864 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
7865 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
7866 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
7867 #define I40E_DIRECTION_INGRESS_KEY              0x8000
7868 #define I40E_TR_L4_TYPE_TCP                     0x2
7869 #define I40E_TR_L4_TYPE_UDP                     0x4
7870 #define I40E_TR_L4_TYPE_SCTP                    0x8
7871
7872 static enum
7873 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7874 {
7875         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7876         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7877         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7878         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7879         enum i40e_status_code status = I40E_SUCCESS;
7880
7881         if (pf->support_multi_driver) {
7882                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7883                 return I40E_NOT_SUPPORTED;
7884         }
7885
7886         memset(&filter_replace, 0,
7887                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7888         memset(&filter_replace_buf, 0,
7889                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7890
7891         /* create L1 filter */
7892         filter_replace.old_filter_type =
7893                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7894         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7895         filter_replace.tr_bit = 0;
7896
7897         /* Prepare the buffer, 3 entries */
7898         filter_replace_buf.data[0] =
7899                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7900         filter_replace_buf.data[0] |=
7901                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7902         filter_replace_buf.data[2] = 0xFF;
7903         filter_replace_buf.data[3] = 0xFF;
7904         filter_replace_buf.data[4] =
7905                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7906         filter_replace_buf.data[4] |=
7907                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7908         filter_replace_buf.data[7] = 0xF0;
7909         filter_replace_buf.data[8]
7910                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7911         filter_replace_buf.data[8] |=
7912                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7913         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7914                 I40E_TR_GENEVE_KEY_MASK |
7915                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7916         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7917                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7918                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7919
7920         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7921                                                &filter_replace_buf);
7922         if (!status && (filter_replace.old_filter_type !=
7923                         filter_replace.new_filter_type))
7924                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7925                             " original: 0x%x, new: 0x%x",
7926                             dev->device->name,
7927                             filter_replace.old_filter_type,
7928                             filter_replace.new_filter_type);
7929
7930         return status;
7931 }
7932
7933 static enum
7934 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7935 {
7936         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7937         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7938         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7939         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7940         enum i40e_status_code status = I40E_SUCCESS;
7941
7942         if (pf->support_multi_driver) {
7943                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7944                 return I40E_NOT_SUPPORTED;
7945         }
7946
7947         /* For MPLSoUDP */
7948         memset(&filter_replace, 0,
7949                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7950         memset(&filter_replace_buf, 0,
7951                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7952         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7953                 I40E_AQC_MIRROR_CLOUD_FILTER;
7954         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7955         filter_replace.new_filter_type =
7956                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7957         /* Prepare the buffer, 2 entries */
7958         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7959         filter_replace_buf.data[0] |=
7960                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7961         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7962         filter_replace_buf.data[4] |=
7963                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7964         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7965                                                &filter_replace_buf);
7966         if (status < 0)
7967                 return status;
7968         if (filter_replace.old_filter_type !=
7969             filter_replace.new_filter_type)
7970                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7971                             " original: 0x%x, new: 0x%x",
7972                             dev->device->name,
7973                             filter_replace.old_filter_type,
7974                             filter_replace.new_filter_type);
7975
7976         /* For MPLSoGRE */
7977         memset(&filter_replace, 0,
7978                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7979         memset(&filter_replace_buf, 0,
7980                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7981
7982         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7983                 I40E_AQC_MIRROR_CLOUD_FILTER;
7984         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7985         filter_replace.new_filter_type =
7986                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7987         /* Prepare the buffer, 2 entries */
7988         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7989         filter_replace_buf.data[0] |=
7990                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7991         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7992         filter_replace_buf.data[4] |=
7993                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7994
7995         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7996                                                &filter_replace_buf);
7997         if (!status && (filter_replace.old_filter_type !=
7998                         filter_replace.new_filter_type))
7999                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8000                             " original: 0x%x, new: 0x%x",
8001                             dev->device->name,
8002                             filter_replace.old_filter_type,
8003                             filter_replace.new_filter_type);
8004
8005         return status;
8006 }
8007
8008 static enum i40e_status_code
8009 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
8010 {
8011         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8012         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8013         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8014         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8015         enum i40e_status_code status = I40E_SUCCESS;
8016
8017         if (pf->support_multi_driver) {
8018                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8019                 return I40E_NOT_SUPPORTED;
8020         }
8021
8022         /* For GTP-C */
8023         memset(&filter_replace, 0,
8024                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8025         memset(&filter_replace_buf, 0,
8026                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8027         /* create L1 filter */
8028         filter_replace.old_filter_type =
8029                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8030         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8031         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8032                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8033         /* Prepare the buffer, 2 entries */
8034         filter_replace_buf.data[0] =
8035                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8036         filter_replace_buf.data[0] |=
8037                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8038         filter_replace_buf.data[2] = 0xFF;
8039         filter_replace_buf.data[3] = 0xFF;
8040         filter_replace_buf.data[4] =
8041                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8042         filter_replace_buf.data[4] |=
8043                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8044         filter_replace_buf.data[6] = 0xFF;
8045         filter_replace_buf.data[7] = 0xFF;
8046         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8047                                                &filter_replace_buf);
8048         if (status < 0)
8049                 return status;
8050         if (filter_replace.old_filter_type !=
8051             filter_replace.new_filter_type)
8052                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8053                             " original: 0x%x, new: 0x%x",
8054                             dev->device->name,
8055                             filter_replace.old_filter_type,
8056                             filter_replace.new_filter_type);
8057
8058         /* for GTP-U */
8059         memset(&filter_replace, 0,
8060                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8061         memset(&filter_replace_buf, 0,
8062                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8063         /* create L1 filter */
8064         filter_replace.old_filter_type =
8065                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8066         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8067         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8068                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8069         /* Prepare the buffer, 2 entries */
8070         filter_replace_buf.data[0] =
8071                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8072         filter_replace_buf.data[0] |=
8073                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8074         filter_replace_buf.data[2] = 0xFF;
8075         filter_replace_buf.data[3] = 0xFF;
8076         filter_replace_buf.data[4] =
8077                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8078         filter_replace_buf.data[4] |=
8079                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8080         filter_replace_buf.data[6] = 0xFF;
8081         filter_replace_buf.data[7] = 0xFF;
8082
8083         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8084                                                &filter_replace_buf);
8085         if (!status && (filter_replace.old_filter_type !=
8086                         filter_replace.new_filter_type))
8087                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8088                             " original: 0x%x, new: 0x%x",
8089                             dev->device->name,
8090                             filter_replace.old_filter_type,
8091                             filter_replace.new_filter_type);
8092
8093         return status;
8094 }
8095
8096 static enum
8097 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8098 {
8099         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8100         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8101         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8102         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8103         enum i40e_status_code status = I40E_SUCCESS;
8104
8105         if (pf->support_multi_driver) {
8106                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8107                 return I40E_NOT_SUPPORTED;
8108         }
8109
8110         /* for GTP-C */
8111         memset(&filter_replace, 0,
8112                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8113         memset(&filter_replace_buf, 0,
8114                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8115         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8116         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8117         filter_replace.new_filter_type =
8118                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8119         /* Prepare the buffer, 2 entries */
8120         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8121         filter_replace_buf.data[0] |=
8122                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8123         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8124         filter_replace_buf.data[4] |=
8125                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8126         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8127                                                &filter_replace_buf);
8128         if (status < 0)
8129                 return status;
8130         if (filter_replace.old_filter_type !=
8131             filter_replace.new_filter_type)
8132                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8133                             " original: 0x%x, new: 0x%x",
8134                             dev->device->name,
8135                             filter_replace.old_filter_type,
8136                             filter_replace.new_filter_type);
8137
8138         /* for GTP-U */
8139         memset(&filter_replace, 0,
8140                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8141         memset(&filter_replace_buf, 0,
8142                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8143         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8144         filter_replace.old_filter_type =
8145                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8146         filter_replace.new_filter_type =
8147                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8148         /* Prepare the buffer, 2 entries */
8149         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8150         filter_replace_buf.data[0] |=
8151                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8152         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8153         filter_replace_buf.data[4] |=
8154                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8155
8156         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8157                                                &filter_replace_buf);
8158         if (!status && (filter_replace.old_filter_type !=
8159                         filter_replace.new_filter_type))
8160                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8161                             " original: 0x%x, new: 0x%x",
8162                             dev->device->name,
8163                             filter_replace.old_filter_type,
8164                             filter_replace.new_filter_type);
8165
8166         return status;
8167 }
8168
8169 static enum i40e_status_code
8170 i40e_replace_port_l1_filter(struct i40e_pf *pf,
8171                             enum i40e_l4_port_type l4_port_type)
8172 {
8173         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8174         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8175         enum i40e_status_code status = I40E_SUCCESS;
8176         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8177         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8178
8179         if (pf->support_multi_driver) {
8180                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8181                 return I40E_NOT_SUPPORTED;
8182         }
8183
8184         memset(&filter_replace, 0,
8185                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8186         memset(&filter_replace_buf, 0,
8187                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8188
8189         /* create L1 filter */
8190         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8191                 filter_replace.old_filter_type =
8192                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8193                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8194                 filter_replace_buf.data[8] =
8195                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8196         } else {
8197                 filter_replace.old_filter_type =
8198                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8199                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8200                 filter_replace_buf.data[8] =
8201                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8202         }
8203
8204         filter_replace.tr_bit = 0;
8205         /* Prepare the buffer, 3 entries */
8206         filter_replace_buf.data[0] =
8207                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8208         filter_replace_buf.data[0] |=
8209                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8210         filter_replace_buf.data[2] = 0x00;
8211         filter_replace_buf.data[3] =
8212                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8213         filter_replace_buf.data[4] =
8214                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8215         filter_replace_buf.data[4] |=
8216                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8217         filter_replace_buf.data[5] = 0x00;
8218         filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8219                 I40E_TR_L4_TYPE_TCP |
8220                 I40E_TR_L4_TYPE_SCTP;
8221         filter_replace_buf.data[7] = 0x00;
8222         filter_replace_buf.data[8] |=
8223                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8224         filter_replace_buf.data[9] = 0x00;
8225         filter_replace_buf.data[10] = 0xFF;
8226         filter_replace_buf.data[11] = 0xFF;
8227
8228         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8229                                                &filter_replace_buf);
8230         if (!status && filter_replace.old_filter_type !=
8231             filter_replace.new_filter_type)
8232                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8233                             " original: 0x%x, new: 0x%x",
8234                             dev->device->name,
8235                             filter_replace.old_filter_type,
8236                             filter_replace.new_filter_type);
8237
8238         return status;
8239 }
8240
8241 static enum i40e_status_code
8242 i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8243                                enum i40e_l4_port_type l4_port_type)
8244 {
8245         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8246         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8247         enum i40e_status_code status = I40E_SUCCESS;
8248         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8249         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8250
8251         if (pf->support_multi_driver) {
8252                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8253                 return I40E_NOT_SUPPORTED;
8254         }
8255
8256         memset(&filter_replace, 0,
8257                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8258         memset(&filter_replace_buf, 0,
8259                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8260
8261         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8262                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8263                 filter_replace.new_filter_type =
8264                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8265                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8266         } else {
8267                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8268                 filter_replace.new_filter_type =
8269                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8270                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8271         }
8272
8273         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8274         filter_replace.tr_bit = 0;
8275         /* Prepare the buffer, 2 entries */
8276         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8277         filter_replace_buf.data[0] |=
8278                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8279         filter_replace_buf.data[4] |=
8280                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8281         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8282                                                &filter_replace_buf);
8283
8284         if (!status && filter_replace.old_filter_type !=
8285             filter_replace.new_filter_type)
8286                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8287                             " original: 0x%x, new: 0x%x",
8288                             dev->device->name,
8289                             filter_replace.old_filter_type,
8290                             filter_replace.new_filter_type);
8291
8292         return status;
8293 }
8294
8295 int
8296 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8297                       struct i40e_tunnel_filter_conf *tunnel_filter,
8298                       uint8_t add)
8299 {
8300         uint16_t ip_type;
8301         uint32_t ipv4_addr, ipv4_addr_le;
8302         uint8_t i, tun_type = 0;
8303         /* internal variable to convert ipv6 byte order */
8304         uint32_t convert_ipv6[4];
8305         int val, ret = 0;
8306         struct i40e_pf_vf *vf = NULL;
8307         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8308         struct i40e_vsi *vsi;
8309         struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8310         struct i40e_aqc_cloud_filters_element_bb *pfilter;
8311         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8312         struct i40e_tunnel_filter *tunnel, *node;
8313         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8314         uint32_t teid_le;
8315         bool big_buffer = 0;
8316
8317         cld_filter = rte_zmalloc("tunnel_filter",
8318                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8319                          0);
8320
8321         if (cld_filter == NULL) {
8322                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8323                 return -ENOMEM;
8324         }
8325         pfilter = cld_filter;
8326
8327         rte_ether_addr_copy(&tunnel_filter->outer_mac,
8328                         (struct rte_ether_addr *)&pfilter->element.outer_mac);
8329         rte_ether_addr_copy(&tunnel_filter->inner_mac,
8330                         (struct rte_ether_addr *)&pfilter->element.inner_mac);
8331
8332         pfilter->element.inner_vlan =
8333                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8334         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8335                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8336                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8337                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8338                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
8339                                 &ipv4_addr_le,
8340                                 sizeof(pfilter->element.ipaddr.v4.data));
8341         } else {
8342                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8343                 for (i = 0; i < 4; i++) {
8344                         convert_ipv6[i] =
8345                         rte_cpu_to_le_32(rte_be_to_cpu_32(
8346                                          tunnel_filter->ip_addr.ipv6_addr[i]));
8347                 }
8348                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
8349                            &convert_ipv6,
8350                            sizeof(pfilter->element.ipaddr.v6.data));
8351         }
8352
8353         /* check tunneled type */
8354         switch (tunnel_filter->tunnel_type) {
8355         case I40E_TUNNEL_TYPE_VXLAN:
8356                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8357                 break;
8358         case I40E_TUNNEL_TYPE_NVGRE:
8359                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8360                 break;
8361         case I40E_TUNNEL_TYPE_IP_IN_GRE:
8362                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8363                 break;
8364         case I40E_TUNNEL_TYPE_MPLSoUDP:
8365                 if (!pf->mpls_replace_flag) {
8366                         i40e_replace_mpls_l1_filter(pf);
8367                         i40e_replace_mpls_cloud_filter(pf);
8368                         pf->mpls_replace_flag = 1;
8369                 }
8370                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8371                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8372                         teid_le >> 4;
8373                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8374                         (teid_le & 0xF) << 12;
8375                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8376                         0x40;
8377                 big_buffer = 1;
8378                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8379                 break;
8380         case I40E_TUNNEL_TYPE_MPLSoGRE:
8381                 if (!pf->mpls_replace_flag) {
8382                         i40e_replace_mpls_l1_filter(pf);
8383                         i40e_replace_mpls_cloud_filter(pf);
8384                         pf->mpls_replace_flag = 1;
8385                 }
8386                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8387                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8388                         teid_le >> 4;
8389                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8390                         (teid_le & 0xF) << 12;
8391                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8392                         0x0;
8393                 big_buffer = 1;
8394                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8395                 break;
8396         case I40E_TUNNEL_TYPE_GTPC:
8397                 if (!pf->gtp_replace_flag) {
8398                         i40e_replace_gtp_l1_filter(pf);
8399                         i40e_replace_gtp_cloud_filter(pf);
8400                         pf->gtp_replace_flag = 1;
8401                 }
8402                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8403                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8404                         (teid_le >> 16) & 0xFFFF;
8405                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8406                         teid_le & 0xFFFF;
8407                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8408                         0x0;
8409                 big_buffer = 1;
8410                 break;
8411         case I40E_TUNNEL_TYPE_GTPU:
8412                 if (!pf->gtp_replace_flag) {
8413                         i40e_replace_gtp_l1_filter(pf);
8414                         i40e_replace_gtp_cloud_filter(pf);
8415                         pf->gtp_replace_flag = 1;
8416                 }
8417                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8418                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8419                         (teid_le >> 16) & 0xFFFF;
8420                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8421                         teid_le & 0xFFFF;
8422                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8423                         0x0;
8424                 big_buffer = 1;
8425                 break;
8426         case I40E_TUNNEL_TYPE_QINQ:
8427                 if (!pf->qinq_replace_flag) {
8428                         ret = i40e_cloud_filter_qinq_create(pf);
8429                         if (ret < 0)
8430                                 PMD_DRV_LOG(DEBUG,
8431                                             "QinQ tunnel filter already created.");
8432                         pf->qinq_replace_flag = 1;
8433                 }
8434                 /*      Add in the General fields the values of
8435                  *      the Outer and Inner VLAN
8436                  *      Big Buffer should be set, see changes in
8437                  *      i40e_aq_add_cloud_filters
8438                  */
8439                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8440                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8441                 big_buffer = 1;
8442                 break;
8443         case I40E_CLOUD_TYPE_UDP:
8444         case I40E_CLOUD_TYPE_TCP:
8445         case I40E_CLOUD_TYPE_SCTP:
8446                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8447                         if (!pf->sport_replace_flag) {
8448                                 i40e_replace_port_l1_filter(pf,
8449                                                 tunnel_filter->l4_port_type);
8450                                 i40e_replace_port_cloud_filter(pf,
8451                                                 tunnel_filter->l4_port_type);
8452                                 pf->sport_replace_flag = 1;
8453                         }
8454                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8455                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8456                                 I40E_DIRECTION_INGRESS_KEY;
8457
8458                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8459                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8460                                         I40E_TR_L4_TYPE_UDP;
8461                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8462                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8463                                         I40E_TR_L4_TYPE_TCP;
8464                         else
8465                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8466                                         I40E_TR_L4_TYPE_SCTP;
8467
8468                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8469                                 (teid_le >> 16) & 0xFFFF;
8470                         big_buffer = 1;
8471                 } else {
8472                         if (!pf->dport_replace_flag) {
8473                                 i40e_replace_port_l1_filter(pf,
8474                                                 tunnel_filter->l4_port_type);
8475                                 i40e_replace_port_cloud_filter(pf,
8476                                                 tunnel_filter->l4_port_type);
8477                                 pf->dport_replace_flag = 1;
8478                         }
8479                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8480                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8481                                 I40E_DIRECTION_INGRESS_KEY;
8482
8483                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8484                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8485                                         I40E_TR_L4_TYPE_UDP;
8486                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8487                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8488                                         I40E_TR_L4_TYPE_TCP;
8489                         else
8490                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8491                                         I40E_TR_L4_TYPE_SCTP;
8492
8493                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8494                                 (teid_le >> 16) & 0xFFFF;
8495                         big_buffer = 1;
8496                 }
8497
8498                 break;
8499         default:
8500                 /* Other tunnel types is not supported. */
8501                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8502                 rte_free(cld_filter);
8503                 return -EINVAL;
8504         }
8505
8506         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8507                 pfilter->element.flags =
8508                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8509         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8510                 pfilter->element.flags =
8511                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8512         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8513                 pfilter->element.flags =
8514                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8515         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8516                 pfilter->element.flags =
8517                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8518         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8519                 pfilter->element.flags |=
8520                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8521         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8522                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8523                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8524                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8525                         pfilter->element.flags |=
8526                                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8527                 else
8528                         pfilter->element.flags |=
8529                                 I40E_AQC_ADD_CLOUD_FILTER_0X10;
8530         } else {
8531                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8532                                                 &pfilter->element.flags);
8533                 if (val < 0) {
8534                         rte_free(cld_filter);
8535                         return -EINVAL;
8536                 }
8537         }
8538
8539         pfilter->element.flags |= rte_cpu_to_le_16(
8540                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8541                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8542         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8543         pfilter->element.queue_number =
8544                 rte_cpu_to_le_16(tunnel_filter->queue_id);
8545
8546         if (!tunnel_filter->is_to_vf)
8547                 vsi = pf->main_vsi;
8548         else {
8549                 if (tunnel_filter->vf_id >= pf->vf_num) {
8550                         PMD_DRV_LOG(ERR, "Invalid argument.");
8551                         rte_free(cld_filter);
8552                         return -EINVAL;
8553                 }
8554                 vf = &pf->vfs[tunnel_filter->vf_id];
8555                 vsi = vf->vsi;
8556         }
8557
8558         /* Check if there is the filter in SW list */
8559         memset(&check_filter, 0, sizeof(check_filter));
8560         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8561         check_filter.is_to_vf = tunnel_filter->is_to_vf;
8562         check_filter.vf_id = tunnel_filter->vf_id;
8563         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8564         if (add && node) {
8565                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8566                 rte_free(cld_filter);
8567                 return -EINVAL;
8568         }
8569
8570         if (!add && !node) {
8571                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8572                 rte_free(cld_filter);
8573                 return -EINVAL;
8574         }
8575
8576         if (add) {
8577                 if (big_buffer)
8578                         ret = i40e_aq_add_cloud_filters_bb(hw,
8579                                                    vsi->seid, cld_filter, 1);
8580                 else
8581                         ret = i40e_aq_add_cloud_filters(hw,
8582                                         vsi->seid, &cld_filter->element, 1);
8583                 if (ret < 0) {
8584                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8585                         rte_free(cld_filter);
8586                         return -ENOTSUP;
8587                 }
8588                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8589                 if (tunnel == NULL) {
8590                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8591                         rte_free(cld_filter);
8592                         return -ENOMEM;
8593                 }
8594
8595                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8596                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8597                 if (ret < 0)
8598                         rte_free(tunnel);
8599         } else {
8600                 if (big_buffer)
8601                         ret = i40e_aq_rem_cloud_filters_bb(
8602                                 hw, vsi->seid, cld_filter, 1);
8603                 else
8604                         ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8605                                                 &cld_filter->element, 1);
8606                 if (ret < 0) {
8607                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8608                         rte_free(cld_filter);
8609                         return -ENOTSUP;
8610                 }
8611                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8612         }
8613
8614         rte_free(cld_filter);
8615         return ret;
8616 }
8617
8618 static int
8619 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8620 {
8621         uint8_t i;
8622
8623         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8624                 if (pf->vxlan_ports[i] == port)
8625                         return i;
8626         }
8627
8628         return -1;
8629 }
8630
8631 static int
8632 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8633 {
8634         int  idx, ret;
8635         uint8_t filter_idx = 0;
8636         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8637
8638         idx = i40e_get_vxlan_port_idx(pf, port);
8639
8640         /* Check if port already exists */
8641         if (idx >= 0) {
8642                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8643                 return -EINVAL;
8644         }
8645
8646         /* Now check if there is space to add the new port */
8647         idx = i40e_get_vxlan_port_idx(pf, 0);
8648         if (idx < 0) {
8649                 PMD_DRV_LOG(ERR,
8650                         "Maximum number of UDP ports reached, not adding port %d",
8651                         port);
8652                 return -ENOSPC;
8653         }
8654
8655         ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8656                                         &filter_idx, NULL);
8657         if (ret < 0) {
8658                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8659                 return -1;
8660         }
8661
8662         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8663                          port,  filter_idx);
8664
8665         /* New port: add it and mark its index in the bitmap */
8666         pf->vxlan_ports[idx] = port;
8667         pf->vxlan_bitmap |= (1 << idx);
8668
8669         if (!(pf->flags & I40E_FLAG_VXLAN))
8670                 pf->flags |= I40E_FLAG_VXLAN;
8671
8672         return 0;
8673 }
8674
8675 static int
8676 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8677 {
8678         int idx;
8679         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8680
8681         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8682                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8683                 return -EINVAL;
8684         }
8685
8686         idx = i40e_get_vxlan_port_idx(pf, port);
8687
8688         if (idx < 0) {
8689                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8690                 return -EINVAL;
8691         }
8692
8693         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8694                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8695                 return -1;
8696         }
8697
8698         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8699                         port, idx);
8700
8701         pf->vxlan_ports[idx] = 0;
8702         pf->vxlan_bitmap &= ~(1 << idx);
8703
8704         if (!pf->vxlan_bitmap)
8705                 pf->flags &= ~I40E_FLAG_VXLAN;
8706
8707         return 0;
8708 }
8709
8710 /* Add UDP tunneling port */
8711 static int
8712 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8713                              struct rte_eth_udp_tunnel *udp_tunnel)
8714 {
8715         int ret = 0;
8716         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8717
8718         if (udp_tunnel == NULL)
8719                 return -EINVAL;
8720
8721         switch (udp_tunnel->prot_type) {
8722         case RTE_TUNNEL_TYPE_VXLAN:
8723                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8724                                           I40E_AQC_TUNNEL_TYPE_VXLAN);
8725                 break;
8726         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8727                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8728                                           I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8729                 break;
8730         case RTE_TUNNEL_TYPE_GENEVE:
8731         case RTE_TUNNEL_TYPE_TEREDO:
8732                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8733                 ret = -1;
8734                 break;
8735
8736         default:
8737                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8738                 ret = -1;
8739                 break;
8740         }
8741
8742         return ret;
8743 }
8744
8745 /* Remove UDP tunneling port */
8746 static int
8747 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8748                              struct rte_eth_udp_tunnel *udp_tunnel)
8749 {
8750         int ret = 0;
8751         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8752
8753         if (udp_tunnel == NULL)
8754                 return -EINVAL;
8755
8756         switch (udp_tunnel->prot_type) {
8757         case RTE_TUNNEL_TYPE_VXLAN:
8758         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8759                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8760                 break;
8761         case RTE_TUNNEL_TYPE_GENEVE:
8762         case RTE_TUNNEL_TYPE_TEREDO:
8763                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8764                 ret = -1;
8765                 break;
8766         default:
8767                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8768                 ret = -1;
8769                 break;
8770         }
8771
8772         return ret;
8773 }
8774
8775 /* Calculate the maximum number of contiguous PF queues that are configured */
8776 int
8777 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8778 {
8779         struct rte_eth_dev_data *data = pf->dev_data;
8780         int i, num;
8781         struct i40e_rx_queue *rxq;
8782
8783         num = 0;
8784         for (i = 0; i < pf->lan_nb_qps; i++) {
8785                 rxq = data->rx_queues[i];
8786                 if (rxq && rxq->q_set)
8787                         num++;
8788                 else
8789                         break;
8790         }
8791
8792         return num;
8793 }
8794
8795 /* Reset the global configure of hash function and input sets */
8796 static void
8797 i40e_pf_global_rss_reset(struct i40e_pf *pf)
8798 {
8799         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8800         uint32_t reg, reg_val;
8801         int i;
8802
8803         /* Reset global RSS function sets */
8804         reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8805         if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) {
8806                 reg_val |= I40E_GLQF_CTL_HTOEP_MASK;
8807                 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val);
8808         }
8809
8810         for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) {
8811                 uint64_t inset;
8812                 int j, pctype;
8813
8814                 if (hw->mac.type == I40E_MAC_X722)
8815                         pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i));
8816                 else
8817                         pctype = i;
8818
8819                 /* Reset pctype insets */
8820                 inset = i40e_get_default_input_set(i);
8821                 if (inset) {
8822                         pf->hash_input_set[pctype] = inset;
8823                         inset = i40e_translate_input_set_reg(hw->mac.type,
8824                                                              inset);
8825
8826                         reg = I40E_GLQF_HASH_INSET(0, pctype);
8827                         i40e_check_write_global_reg(hw, reg, (uint32_t)inset);
8828                         reg = I40E_GLQF_HASH_INSET(1, pctype);
8829                         i40e_check_write_global_reg(hw, reg,
8830                                                     (uint32_t)(inset >> 32));
8831
8832                         /* Clear unused mask registers of the pctype */
8833                         for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) {
8834                                 reg = I40E_GLQF_HASH_MSK(j, pctype);
8835                                 i40e_check_write_global_reg(hw, reg, 0);
8836                         }
8837                 }
8838
8839                 /* Reset pctype symmetric sets */
8840                 reg = I40E_GLQF_HSYM(pctype);
8841                 reg_val = i40e_read_rx_ctl(hw, reg);
8842                 if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8843                         reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
8844                         i40e_write_global_rx_ctl(hw, reg, reg_val);
8845                 }
8846         }
8847         I40E_WRITE_FLUSH(hw);
8848 }
8849
8850 int
8851 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
8852 {
8853         struct i40e_hw *hw = &pf->adapter->hw;
8854         uint8_t lut[ETH_RSS_RETA_SIZE_512];
8855         uint32_t i;
8856         int num;
8857
8858         /* If both VMDQ and RSS enabled, not all of PF queues are
8859          * configured. It's necessary to calculate the actual PF
8860          * queues that are configured.
8861          */
8862         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8863                 num = i40e_pf_calc_configured_queues_num(pf);
8864         else
8865                 num = pf->dev_data->nb_rx_queues;
8866
8867         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8868         if (num <= 0)
8869                 return 0;
8870
8871         for (i = 0; i < hw->func_caps.rss_table_size; i++)
8872                 lut[i] = (uint8_t)(i % (uint32_t)num);
8873
8874         return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
8875 }
8876
8877 int
8878 i40e_pf_reset_rss_key(struct i40e_pf *pf)
8879 {
8880         const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8881                         sizeof(uint32_t);
8882         uint8_t *rss_key;
8883
8884         /* Reset key */
8885         rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key;
8886         if (!rss_key ||
8887             pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) {
8888                 static uint32_t rss_key_default[] = {0x6b793944,
8889                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8890                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8891                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8892
8893                 rss_key = (uint8_t *)rss_key_default;
8894         }
8895
8896         return i40e_set_rss_key(pf->main_vsi, rss_key, key_len);
8897 }
8898
8899 static int
8900 i40e_pf_rss_reset(struct i40e_pf *pf)
8901 {
8902         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8903
8904         int ret;
8905
8906         pf->hash_filter_enabled = 0;
8907         i40e_pf_disable_rss(pf);
8908         i40e_set_symmetric_hash_enable_per_port(hw, 0);
8909
8910         if (!pf->support_multi_driver)
8911                 i40e_pf_global_rss_reset(pf);
8912
8913         /* Reset RETA table */
8914         if (pf->adapter->rss_reta_updated == 0) {
8915                 ret = i40e_pf_reset_rss_reta(pf);
8916                 if (ret)
8917                         return ret;
8918         }
8919
8920         return i40e_pf_reset_rss_key(pf);
8921 }
8922
8923 /* Configure RSS */
8924 int
8925 i40e_pf_config_rss(struct i40e_pf *pf)
8926 {
8927         struct i40e_hw *hw;
8928         enum rte_eth_rx_mq_mode mq_mode;
8929         uint64_t rss_hf, hena;
8930         int ret;
8931
8932         ret = i40e_pf_rss_reset(pf);
8933         if (ret) {
8934                 PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled");
8935                 return ret;
8936         }
8937
8938         rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
8939         mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8940         if (!(rss_hf & pf->adapter->flow_types_mask) ||
8941             !(mq_mode & ETH_MQ_RX_RSS_FLAG))
8942                 return 0;
8943
8944         hw = I40E_PF_TO_HW(pf);
8945         hena = i40e_config_hena(pf->adapter, rss_hf);
8946         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
8947         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
8948         I40E_WRITE_FLUSH(hw);
8949
8950         return 0;
8951 }
8952
8953 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8954 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8955 int
8956 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8957 {
8958         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8959         uint32_t val, reg;
8960         int ret = -EINVAL;
8961
8962         if (pf->support_multi_driver) {
8963                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8964                 return -ENOTSUP;
8965         }
8966
8967         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8968         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8969
8970         if (len == 3) {
8971                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8972         } else if (len == 4) {
8973                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8974         } else {
8975                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8976                 return ret;
8977         }
8978
8979         if (reg != val) {
8980                 ret = i40e_aq_debug_write_global_register(hw,
8981                                                    I40E_GL_PRS_FVBM(2),
8982                                                    reg, NULL);
8983                 if (ret != 0)
8984                         return ret;
8985                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8986                             "with value 0x%08x",
8987                             I40E_GL_PRS_FVBM(2), reg);
8988         } else {
8989                 ret = 0;
8990         }
8991         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8992                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8993
8994         return ret;
8995 }
8996
8997 /* Set the symmetric hash enable configurations per port */
8998 void
8999 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
9000 {
9001         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9002
9003         if (enable > 0) {
9004                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)
9005                         return;
9006
9007                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9008         } else {
9009                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK))
9010                         return;
9011
9012                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9013         }
9014         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
9015         I40E_WRITE_FLUSH(hw);
9016 }
9017
9018 /**
9019  * Valid input sets for hash and flow director filters per PCTYPE
9020  */
9021 static uint64_t
9022 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9023                 enum rte_filter_type filter)
9024 {
9025         uint64_t valid;
9026
9027         static const uint64_t valid_hash_inset_table[] = {
9028                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9029                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9030                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9031                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9032                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9033                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9034                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9035                         I40E_INSET_FLEX_PAYLOAD,
9036                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9037                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9038                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9039                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9040                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9041                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9042                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9043                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9044                         I40E_INSET_FLEX_PAYLOAD,
9045                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9046                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9047                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9048                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9049                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9050                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9051                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9052                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9053                         I40E_INSET_FLEX_PAYLOAD,
9054                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9055                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9056                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9057                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9058                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9059                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9060                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9061                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9062                         I40E_INSET_FLEX_PAYLOAD,
9063                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9064                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9065                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9066                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9067                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9068                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9069                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9070                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9071                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9072                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9073                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9074                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9075                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9076                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9077                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9078                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9079                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9080                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9081                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9082                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9083                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9084                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9085                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9086                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9087                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9088                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9089                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9090                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9091                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9092                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9093                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9094                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9095                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9096                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9097                         I40E_INSET_FLEX_PAYLOAD,
9098                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9099                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9100                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9101                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9102                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9103                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9104                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9105                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9106                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9107                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9108                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9109                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9110                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9111                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9112                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9113                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9114                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9115                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9116                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9117                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9118                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9119                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9120                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9121                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9122                         I40E_INSET_FLEX_PAYLOAD,
9123                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9124                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9125                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9126                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9127                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9128                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9129                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9130                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9131                         I40E_INSET_FLEX_PAYLOAD,
9132                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9133                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9134                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9135                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9136                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9137                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9138                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9139                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9140                         I40E_INSET_FLEX_PAYLOAD,
9141                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9142                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9143                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9144                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9145                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9146                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9147                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9148                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9149                         I40E_INSET_FLEX_PAYLOAD,
9150                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9151                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9152                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9153                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9154                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9155                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9156                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9157                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9158                         I40E_INSET_FLEX_PAYLOAD,
9159                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9160                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9161                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9162                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9163                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9164                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9165                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9166                         I40E_INSET_FLEX_PAYLOAD,
9167                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9168                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9169                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9170                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9171                         I40E_INSET_FLEX_PAYLOAD,
9172         };
9173
9174         /**
9175          * Flow director supports only fields defined in
9176          * union rte_eth_fdir_flow.
9177          */
9178         static const uint64_t valid_fdir_inset_table[] = {
9179                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9180                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9181                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9182                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9183                 I40E_INSET_IPV4_TTL,
9184                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9185                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9186                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9187                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9188                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9189                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9190                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9191                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9192                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9193                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9194                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9195                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9196                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9197                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9198                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9199                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9200                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9201                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9202                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9203                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9204                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9205                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9206                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9207                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9208                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9209                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9210                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9211                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9212                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9213                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9214                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9215                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9216                 I40E_INSET_SCTP_VT,
9217                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9218                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9219                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9220                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9221                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9222                 I40E_INSET_IPV4_TTL,
9223                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9224                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9225                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9226                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9227                 I40E_INSET_IPV6_HOP_LIMIT,
9228                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9229                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9230                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9231                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9232                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9233                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9234                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9235                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9236                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9237                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9238                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9239                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9240                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9241                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9242                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9243                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9244                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9245                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9246                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9247                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9248                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9249                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9250                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9251                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9252                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9253                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9254                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9255                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9256                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9257                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9258                 I40E_INSET_SCTP_VT,
9259                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9260                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9261                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9262                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9263                 I40E_INSET_IPV6_HOP_LIMIT,
9264                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9265                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9266                 I40E_INSET_LAST_ETHER_TYPE,
9267         };
9268
9269         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9270                 return 0;
9271         if (filter == RTE_ETH_FILTER_HASH)
9272                 valid = valid_hash_inset_table[pctype];
9273         else
9274                 valid = valid_fdir_inset_table[pctype];
9275
9276         return valid;
9277 }
9278
9279 /**
9280  * Validate if the input set is allowed for a specific PCTYPE
9281  */
9282 int
9283 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9284                 enum rte_filter_type filter, uint64_t inset)
9285 {
9286         uint64_t valid;
9287
9288         valid = i40e_get_valid_input_set(pctype, filter);
9289         if (inset & (~valid))
9290                 return -EINVAL;
9291
9292         return 0;
9293 }
9294
9295 /* default input set fields combination per pctype */
9296 uint64_t
9297 i40e_get_default_input_set(uint16_t pctype)
9298 {
9299         static const uint64_t default_inset_table[] = {
9300                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9301                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9302                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9303                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9304                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9305                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9306                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9307                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9308                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9309                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9310                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9311                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9312                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9313                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9314                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9315                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9316                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9317                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9318                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9319                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9320                         I40E_INSET_SCTP_VT,
9321                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9322                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9323                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9324                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9325                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9326                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9327                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9328                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9329                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9330                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9331                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9332                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9333                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9334                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9335                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9336                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9337                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9338                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9339                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9340                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9341                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9342                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9343                         I40E_INSET_SCTP_VT,
9344                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9345                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9346                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9347                         I40E_INSET_LAST_ETHER_TYPE,
9348         };
9349
9350         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9351                 return 0;
9352
9353         return default_inset_table[pctype];
9354 }
9355
9356 /**
9357  * Translate the input set from bit masks to register aware bit masks
9358  * and vice versa
9359  */
9360 uint64_t
9361 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9362 {
9363         uint64_t val = 0;
9364         uint16_t i;
9365
9366         struct inset_map {
9367                 uint64_t inset;
9368                 uint64_t inset_reg;
9369         };
9370
9371         static const struct inset_map inset_map_common[] = {
9372                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9373                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9374                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9375                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9376                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9377                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9378                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9379                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9380                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9381                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9382                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9383                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9384                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9385                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9386                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9387                 {I40E_INSET_TUNNEL_DMAC,
9388                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9389                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9390                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9391                 {I40E_INSET_TUNNEL_SRC_PORT,
9392                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9393                 {I40E_INSET_TUNNEL_DST_PORT,
9394                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9395                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9396                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9397                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9398                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9399                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9400                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9401                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9402                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9403                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9404         };
9405
9406     /* some different registers map in x722*/
9407         static const struct inset_map inset_map_diff_x722[] = {
9408                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9409                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9410                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9411                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9412         };
9413
9414         static const struct inset_map inset_map_diff_not_x722[] = {
9415                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9416                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9417                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9418                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9419         };
9420
9421         if (input == 0)
9422                 return val;
9423
9424         /* Translate input set to register aware inset */
9425         if (type == I40E_MAC_X722) {
9426                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9427                         if (input & inset_map_diff_x722[i].inset)
9428                                 val |= inset_map_diff_x722[i].inset_reg;
9429                 }
9430         } else {
9431                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9432                         if (input & inset_map_diff_not_x722[i].inset)
9433                                 val |= inset_map_diff_not_x722[i].inset_reg;
9434                 }
9435         }
9436
9437         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9438                 if (input & inset_map_common[i].inset)
9439                         val |= inset_map_common[i].inset_reg;
9440         }
9441
9442         return val;
9443 }
9444
9445 static int
9446 i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start,
9447                             uint32_t pit_reg_count, uint32_t hdr_off)
9448 {
9449         const uint32_t pit_reg_end = pit_reg_start + pit_reg_count;
9450         uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off);
9451         uint32_t i, reg_val, src_off, count;
9452
9453         for (i = pit_reg_start; i < pit_reg_end; i++) {
9454                 reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i));
9455
9456                 src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val);
9457                 count = I40E_GLQF_PIT_FSIZE_GET(reg_val);
9458
9459                 if (src_off <= field_off && (src_off + count) > field_off)
9460                         break;
9461         }
9462
9463         if (i >= pit_reg_end) {
9464                 PMD_DRV_LOG(ERR,
9465                             "Hardware GLQF_PIT configuration does not support this field mask");
9466                 return -1;
9467         }
9468
9469         return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off;
9470 }
9471
9472 int
9473 i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset,
9474                              uint32_t *mask, uint8_t nb_elem)
9475 {
9476         static const uint64_t mask_inset[] = {
9477                 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL,
9478                 I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT };
9479
9480         static const struct {
9481                 uint64_t inset;
9482                 uint32_t mask;
9483                 uint32_t offset;
9484         } inset_mask_offset_map[] = {
9485                 { I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK,
9486                   offsetof(struct rte_ipv4_hdr, type_of_service) },
9487
9488                 { I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK,
9489                   offsetof(struct rte_ipv4_hdr, next_proto_id) },
9490
9491                 { I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK,
9492                   offsetof(struct rte_ipv4_hdr, time_to_live) },
9493
9494                 { I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK,
9495                   offsetof(struct rte_ipv6_hdr, vtc_flow) },
9496
9497                 { I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK,
9498                   offsetof(struct rte_ipv6_hdr, proto) },
9499
9500                 { I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK,
9501                   offsetof(struct rte_ipv6_hdr, hop_limits) },
9502         };
9503
9504         uint32_t i;
9505         int idx = 0;
9506
9507         assert(mask);
9508         if (!inset)
9509                 return 0;
9510
9511         for (i = 0; i < RTE_DIM(mask_inset); i++) {
9512                 /* Clear the inset bit, if no MASK is required,
9513                  * for example proto + ttl
9514                  */
9515                 if ((mask_inset[i] & inset) == mask_inset[i]) {
9516                         inset &= ~mask_inset[i];
9517                         if (!inset)
9518                                 return 0;
9519                 }
9520         }
9521
9522         for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) {
9523                 uint32_t pit_start, pit_count;
9524                 int offset;
9525
9526                 if (!(inset_mask_offset_map[i].inset & inset))
9527                         continue;
9528
9529                 if (inset_mask_offset_map[i].inset &
9530                     (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9531                      I40E_INSET_IPV4_TTL)) {
9532                         pit_start = I40E_GLQF_PIT_IPV4_START;
9533                         pit_count = I40E_GLQF_PIT_IPV4_COUNT;
9534                 } else {
9535                         pit_start = I40E_GLQF_PIT_IPV6_START;
9536                         pit_count = I40E_GLQF_PIT_IPV6_COUNT;
9537                 }
9538
9539                 offset = i40e_get_inset_field_offset(hw, pit_start, pit_count,
9540                                 inset_mask_offset_map[i].offset);
9541
9542                 if (offset < 0)
9543                         return -EINVAL;
9544
9545                 if (idx >= nb_elem) {
9546                         PMD_DRV_LOG(ERR,
9547                                     "Configuration of inset mask out of range %u",
9548                                     nb_elem);
9549                         return -ERANGE;
9550                 }
9551
9552                 mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset,
9553                                                 inset_mask_offset_map[i].mask);
9554                 idx++;
9555         }
9556
9557         return idx;
9558 }
9559
9560 void
9561 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9562 {
9563         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9564
9565         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9566         if (reg != val)
9567                 i40e_write_rx_ctl(hw, addr, val);
9568         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9569                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9570 }
9571
9572 void
9573 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9574 {
9575         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9576         struct rte_eth_dev *dev;
9577
9578         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
9579         if (reg != val) {
9580                 i40e_write_rx_ctl(hw, addr, val);
9581                 PMD_DRV_LOG(WARNING,
9582                             "i40e device %s changed global register [0x%08x]."
9583                             " original: 0x%08x, new: 0x%08x",
9584                             dev->device->name, addr, reg,
9585                             (uint32_t)i40e_read_rx_ctl(hw, addr));
9586         }
9587 }
9588
9589 static void
9590 i40e_filter_input_set_init(struct i40e_pf *pf)
9591 {
9592         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9593         enum i40e_filter_pctype pctype;
9594         uint64_t input_set, inset_reg;
9595         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9596         int num, i;
9597         uint16_t flow_type;
9598
9599         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9600              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9601                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9602
9603                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9604                         continue;
9605
9606                 input_set = i40e_get_default_input_set(pctype);
9607
9608                 num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9609                                                    I40E_INSET_MASK_NUM_REG);
9610                 if (num < 0)
9611                         return;
9612                 if (pf->support_multi_driver && num > 0) {
9613                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9614                         return;
9615                 }
9616                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9617                                         input_set);
9618
9619                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9620                                       (uint32_t)(inset_reg & UINT32_MAX));
9621                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9622                                      (uint32_t)((inset_reg >>
9623                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
9624                 if (!pf->support_multi_driver) {
9625                         i40e_check_write_global_reg(hw,
9626                                             I40E_GLQF_HASH_INSET(0, pctype),
9627                                             (uint32_t)(inset_reg & UINT32_MAX));
9628                         i40e_check_write_global_reg(hw,
9629                                              I40E_GLQF_HASH_INSET(1, pctype),
9630                                              (uint32_t)((inset_reg >>
9631                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
9632
9633                         for (i = 0; i < num; i++) {
9634                                 i40e_check_write_global_reg(hw,
9635                                                     I40E_GLQF_FD_MSK(i, pctype),
9636                                                     mask_reg[i]);
9637                                 i40e_check_write_global_reg(hw,
9638                                                   I40E_GLQF_HASH_MSK(i, pctype),
9639                                                   mask_reg[i]);
9640                         }
9641                         /*clear unused mask registers of the pctype */
9642                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9643                                 i40e_check_write_global_reg(hw,
9644                                                     I40E_GLQF_FD_MSK(i, pctype),
9645                                                     0);
9646                                 i40e_check_write_global_reg(hw,
9647                                                   I40E_GLQF_HASH_MSK(i, pctype),
9648                                                   0);
9649                         }
9650                 } else {
9651                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9652                 }
9653                 I40E_WRITE_FLUSH(hw);
9654
9655                 /* store the default input set */
9656                 if (!pf->support_multi_driver)
9657                         pf->hash_input_set[pctype] = input_set;
9658                 pf->fdir.input_set[pctype] = input_set;
9659         }
9660 }
9661
9662 int
9663 i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
9664                     uint32_t pctype, bool add)
9665 {
9666         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9667         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9668         uint64_t inset_reg = 0;
9669         int num, i;
9670
9671         if (pf->support_multi_driver) {
9672                 PMD_DRV_LOG(ERR,
9673                             "Modify input set is not permitted when multi-driver enabled.");
9674                 return -EPERM;
9675         }
9676
9677         /* For X722, get translated pctype in fd pctype register */
9678         if (hw->mac.type == I40E_MAC_X722)
9679                 pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
9680
9681         if (add) {
9682                 /* get inset value in register */
9683                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9684                 inset_reg <<= I40E_32_BIT_WIDTH;
9685                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9686                 input_set |= pf->hash_input_set[pctype];
9687         }
9688         num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9689                                            I40E_INSET_MASK_NUM_REG);
9690         if (num < 0)
9691                 return -EINVAL;
9692
9693         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9694
9695         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9696                                     (uint32_t)(inset_reg & UINT32_MAX));
9697         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9698                                     (uint32_t)((inset_reg >>
9699                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
9700
9701         for (i = 0; i < num; i++)
9702                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9703                                             mask_reg[i]);
9704         /*clear unused mask registers of the pctype */
9705         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9706                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9707                                             0);
9708         I40E_WRITE_FLUSH(hw);
9709
9710         pf->hash_input_set[pctype] = input_set;
9711         return 0;
9712 }
9713
9714 /* Convert ethertype filter structure */
9715 static int
9716 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9717                               struct i40e_ethertype_filter *filter)
9718 {
9719         rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
9720                 RTE_ETHER_ADDR_LEN);
9721         filter->input.ether_type = input->ether_type;
9722         filter->flags = input->flags;
9723         filter->queue = input->queue;
9724
9725         return 0;
9726 }
9727
9728 /* Check if there exists the ehtertype filter */
9729 struct i40e_ethertype_filter *
9730 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9731                                 const struct i40e_ethertype_filter_input *input)
9732 {
9733         int ret;
9734
9735         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9736         if (ret < 0)
9737                 return NULL;
9738
9739         return ethertype_rule->hash_map[ret];
9740 }
9741
9742 /* Add ethertype filter in SW list */
9743 static int
9744 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9745                                 struct i40e_ethertype_filter *filter)
9746 {
9747         struct i40e_ethertype_rule *rule = &pf->ethertype;
9748         int ret;
9749
9750         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9751         if (ret < 0) {
9752                 PMD_DRV_LOG(ERR,
9753                             "Failed to insert ethertype filter"
9754                             " to hash table %d!",
9755                             ret);
9756                 return ret;
9757         }
9758         rule->hash_map[ret] = filter;
9759
9760         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9761
9762         return 0;
9763 }
9764
9765 /* Delete ethertype filter in SW list */
9766 int
9767 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9768                              struct i40e_ethertype_filter_input *input)
9769 {
9770         struct i40e_ethertype_rule *rule = &pf->ethertype;
9771         struct i40e_ethertype_filter *filter;
9772         int ret;
9773
9774         ret = rte_hash_del_key(rule->hash_table, input);
9775         if (ret < 0) {
9776                 PMD_DRV_LOG(ERR,
9777                             "Failed to delete ethertype filter"
9778                             " to hash table %d!",
9779                             ret);
9780                 return ret;
9781         }
9782         filter = rule->hash_map[ret];
9783         rule->hash_map[ret] = NULL;
9784
9785         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9786         rte_free(filter);
9787
9788         return 0;
9789 }
9790
9791 /*
9792  * Configure ethertype filter, which can director packet by filtering
9793  * with mac address and ether_type or only ether_type
9794  */
9795 int
9796 i40e_ethertype_filter_set(struct i40e_pf *pf,
9797                         struct rte_eth_ethertype_filter *filter,
9798                         bool add)
9799 {
9800         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9801         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9802         struct i40e_ethertype_filter *ethertype_filter, *node;
9803         struct i40e_ethertype_filter check_filter;
9804         struct i40e_control_filter_stats stats;
9805         uint16_t flags = 0;
9806         int ret;
9807
9808         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9809                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9810                 return -EINVAL;
9811         }
9812         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
9813                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
9814                 PMD_DRV_LOG(ERR,
9815                         "unsupported ether_type(0x%04x) in control packet filter.",
9816                         filter->ether_type);
9817                 return -EINVAL;
9818         }
9819         if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
9820                 PMD_DRV_LOG(WARNING,
9821                         "filter vlan ether_type in first tag is not supported.");
9822
9823         /* Check if there is the filter in SW list */
9824         memset(&check_filter, 0, sizeof(check_filter));
9825         i40e_ethertype_filter_convert(filter, &check_filter);
9826         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9827                                                &check_filter.input);
9828         if (add && node) {
9829                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9830                 return -EINVAL;
9831         }
9832
9833         if (!add && !node) {
9834                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9835                 return -EINVAL;
9836         }
9837
9838         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9839                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9840         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9841                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9842         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9843
9844         memset(&stats, 0, sizeof(stats));
9845         ret = i40e_aq_add_rem_control_packet_filter(hw,
9846                         filter->mac_addr.addr_bytes,
9847                         filter->ether_type, flags,
9848                         pf->main_vsi->seid,
9849                         filter->queue, add, &stats, NULL);
9850
9851         PMD_DRV_LOG(INFO,
9852                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9853                 ret, stats.mac_etype_used, stats.etype_used,
9854                 stats.mac_etype_free, stats.etype_free);
9855         if (ret < 0)
9856                 return -ENOSYS;
9857
9858         /* Add or delete a filter in SW list */
9859         if (add) {
9860                 ethertype_filter = rte_zmalloc("ethertype_filter",
9861                                        sizeof(*ethertype_filter), 0);
9862                 if (ethertype_filter == NULL) {
9863                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9864                         return -ENOMEM;
9865                 }
9866
9867                 rte_memcpy(ethertype_filter, &check_filter,
9868                            sizeof(check_filter));
9869                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9870                 if (ret < 0)
9871                         rte_free(ethertype_filter);
9872         } else {
9873                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9874         }
9875
9876         return ret;
9877 }
9878
9879 static int
9880 i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
9881                       const struct rte_flow_ops **ops)
9882 {
9883         if (dev == NULL)
9884                 return -EINVAL;
9885
9886         *ops = &i40e_flow_ops;
9887         return 0;
9888 }
9889
9890 /*
9891  * Check and enable Extended Tag.
9892  * Enabling Extended Tag is important for 40G performance.
9893  */
9894 static void
9895 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9896 {
9897         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9898         uint32_t buf = 0;
9899         int ret;
9900
9901         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9902                                       PCI_DEV_CAP_REG);
9903         if (ret < 0) {
9904                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9905                             PCI_DEV_CAP_REG);
9906                 return;
9907         }
9908         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9909                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9910                 return;
9911         }
9912
9913         buf = 0;
9914         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9915                                       PCI_DEV_CTRL_REG);
9916         if (ret < 0) {
9917                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9918                             PCI_DEV_CTRL_REG);
9919                 return;
9920         }
9921         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9922                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9923                 return;
9924         }
9925         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9926         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9927                                        PCI_DEV_CTRL_REG);
9928         if (ret < 0) {
9929                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9930                             PCI_DEV_CTRL_REG);
9931                 return;
9932         }
9933 }
9934
9935 /*
9936  * As some registers wouldn't be reset unless a global hardware reset,
9937  * hardware initialization is needed to put those registers into an
9938  * expected initial state.
9939  */
9940 static void
9941 i40e_hw_init(struct rte_eth_dev *dev)
9942 {
9943         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9944
9945         i40e_enable_extended_tag(dev);
9946
9947         /* clear the PF Queue Filter control register */
9948         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9949
9950         /* Disable symmetric hash per port */
9951         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9952 }
9953
9954 /*
9955  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9956  * however this function will return only one highest pctype index,
9957  * which is not quite correct. This is known problem of i40e driver
9958  * and needs to be fixed later.
9959  */
9960 enum i40e_filter_pctype
9961 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9962 {
9963         int i;
9964         uint64_t pctype_mask;
9965
9966         if (flow_type < I40E_FLOW_TYPE_MAX) {
9967                 pctype_mask = adapter->pctypes_tbl[flow_type];
9968                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9969                         if (pctype_mask & (1ULL << i))
9970                                 return (enum i40e_filter_pctype)i;
9971                 }
9972         }
9973         return I40E_FILTER_PCTYPE_INVALID;
9974 }
9975
9976 uint16_t
9977 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9978                         enum i40e_filter_pctype pctype)
9979 {
9980         uint16_t flowtype;
9981         uint64_t pctype_mask = 1ULL << pctype;
9982
9983         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9984              flowtype++) {
9985                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9986                         return flowtype;
9987         }
9988
9989         return RTE_ETH_FLOW_UNKNOWN;
9990 }
9991
9992 /*
9993  * On X710, performance number is far from the expectation on recent firmware
9994  * versions; on XL710, performance number is also far from the expectation on
9995  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9996  * mode is enabled and port MAC address is equal to the packet destination MAC
9997  * address. The fix for this issue may not be integrated in the following
9998  * firmware version. So the workaround in software driver is needed. It needs
9999  * to modify the initial values of 3 internal only registers for both X710 and
10000  * XL710. Note that the values for X710 or XL710 could be different, and the
10001  * workaround can be removed when it is fixed in firmware in the future.
10002  */
10003
10004 /* For both X710 and XL710 */
10005 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
10006 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
10007 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
10008
10009 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10010 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
10011
10012 /* For X722 */
10013 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10014 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10015
10016 /* For X710 */
10017 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10018 /* For XL710 */
10019 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10020 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10021
10022 /*
10023  * GL_SWR_PM_UP_THR:
10024  * The value is not impacted from the link speed, its value is set according
10025  * to the total number of ports for a better pipe-monitor configuration.
10026  */
10027 static bool
10028 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10029 {
10030 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10031                 .device_id = (dev),   \
10032                 .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10033
10034 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10035                 .device_id = (dev),   \
10036                 .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10037
10038         static const struct {
10039                 uint16_t device_id;
10040                 uint32_t val;
10041         } swr_pm_table[] = {
10042                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10043                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10044                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10045                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10046                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10047
10048                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10049                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10050                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10051                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10052                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10053                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10054                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10055         };
10056         uint32_t i;
10057
10058         if (value == NULL) {
10059                 PMD_DRV_LOG(ERR, "value is NULL");
10060                 return false;
10061         }
10062
10063         for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10064                 if (hw->device_id == swr_pm_table[i].device_id) {
10065                         *value = swr_pm_table[i].val;
10066
10067                         PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10068                                     "value - 0x%08x",
10069                                     hw->device_id, *value);
10070                         return true;
10071                 }
10072         }
10073
10074         return false;
10075 }
10076
10077 static int
10078 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10079 {
10080         enum i40e_status_code status;
10081         struct i40e_aq_get_phy_abilities_resp phy_ab;
10082         int ret = -ENOTSUP;
10083         int retries = 0;
10084
10085         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10086                                               NULL);
10087
10088         while (status) {
10089                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10090                         status);
10091                 retries++;
10092                 rte_delay_us(100000);
10093                 if  (retries < 5)
10094                         status = i40e_aq_get_phy_capabilities(hw, false,
10095                                         true, &phy_ab, NULL);
10096                 else
10097                         return ret;
10098         }
10099         return 0;
10100 }
10101
10102 static void
10103 i40e_configure_registers(struct i40e_hw *hw)
10104 {
10105         static struct {
10106                 uint32_t addr;
10107                 uint64_t val;
10108         } reg_table[] = {
10109                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10110                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10111                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10112         };
10113         uint64_t reg;
10114         uint32_t i;
10115         int ret;
10116
10117         for (i = 0; i < RTE_DIM(reg_table); i++) {
10118                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10119                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10120                                 reg_table[i].val =
10121                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10122                         else /* For X710/XL710/XXV710 */
10123                                 if (hw->aq.fw_maj_ver < 6)
10124                                         reg_table[i].val =
10125                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10126                                 else
10127                                         reg_table[i].val =
10128                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10129                 }
10130
10131                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10132                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10133                                 reg_table[i].val =
10134                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10135                         else /* For X710/XL710/XXV710 */
10136                                 reg_table[i].val =
10137                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10138                 }
10139
10140                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10141                         uint32_t cfg_val;
10142
10143                         if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10144                                 PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10145                                             "GL_SWR_PM_UP_THR value fixup",
10146                                             hw->device_id);
10147                                 continue;
10148                         }
10149
10150                         reg_table[i].val = cfg_val;
10151                 }
10152
10153                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10154                                                         &reg, NULL);
10155                 if (ret < 0) {
10156                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10157                                                         reg_table[i].addr);
10158                         break;
10159                 }
10160                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10161                                                 reg_table[i].addr, reg);
10162                 if (reg == reg_table[i].val)
10163                         continue;
10164
10165                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10166                                                 reg_table[i].val, NULL);
10167                 if (ret < 0) {
10168                         PMD_DRV_LOG(ERR,
10169                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10170                                 reg_table[i].val, reg_table[i].addr);
10171                         break;
10172                 }
10173                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10174                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10175         }
10176 }
10177
10178 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10179 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10180 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10181 static int
10182 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10183 {
10184         uint32_t reg;
10185         int ret;
10186
10187         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10188                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10189                 return -EINVAL;
10190         }
10191
10192         /* Configure for double VLAN RX stripping */
10193         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10194         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10195                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10196                 ret = i40e_aq_debug_write_register(hw,
10197                                                    I40E_VSI_TSR(vsi->vsi_id),
10198                                                    reg, NULL);
10199                 if (ret < 0) {
10200                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10201                                     vsi->vsi_id);
10202                         return I40E_ERR_CONFIG;
10203                 }
10204         }
10205
10206         /* Configure for double VLAN TX insertion */
10207         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10208         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10209                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10210                 ret = i40e_aq_debug_write_register(hw,
10211                                                    I40E_VSI_L2TAGSTXVALID(
10212                                                    vsi->vsi_id), reg, NULL);
10213                 if (ret < 0) {
10214                         PMD_DRV_LOG(ERR,
10215                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
10216                                 vsi->vsi_id);
10217                         return I40E_ERR_CONFIG;
10218                 }
10219         }
10220
10221         return 0;
10222 }
10223
10224 /**
10225  * i40e_aq_add_mirror_rule
10226  * @hw: pointer to the hardware structure
10227  * @seid: VEB seid to add mirror rule to
10228  * @dst_id: destination vsi seid
10229  * @entries: Buffer which contains the entities to be mirrored
10230  * @count: number of entities contained in the buffer
10231  * @rule_id:the rule_id of the rule to be added
10232  *
10233  * Add a mirror rule for a given veb.
10234  *
10235  **/
10236 static enum i40e_status_code
10237 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10238                         uint16_t seid, uint16_t dst_id,
10239                         uint16_t rule_type, uint16_t *entries,
10240                         uint16_t count, uint16_t *rule_id)
10241 {
10242         struct i40e_aq_desc desc;
10243         struct i40e_aqc_add_delete_mirror_rule cmd;
10244         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10245                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
10246                 &desc.params.raw;
10247         uint16_t buff_len;
10248         enum i40e_status_code status;
10249
10250         i40e_fill_default_direct_cmd_desc(&desc,
10251                                           i40e_aqc_opc_add_mirror_rule);
10252         memset(&cmd, 0, sizeof(cmd));
10253
10254         buff_len = sizeof(uint16_t) * count;
10255         desc.datalen = rte_cpu_to_le_16(buff_len);
10256         if (buff_len > 0)
10257                 desc.flags |= rte_cpu_to_le_16(
10258                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10259         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10260                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10261         cmd.num_entries = rte_cpu_to_le_16(count);
10262         cmd.seid = rte_cpu_to_le_16(seid);
10263         cmd.destination = rte_cpu_to_le_16(dst_id);
10264
10265         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10266         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10267         PMD_DRV_LOG(INFO,
10268                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10269                 hw->aq.asq_last_status, resp->rule_id,
10270                 resp->mirror_rules_used, resp->mirror_rules_free);
10271         *rule_id = rte_le_to_cpu_16(resp->rule_id);
10272
10273         return status;
10274 }
10275
10276 /**
10277  * i40e_aq_del_mirror_rule
10278  * @hw: pointer to the hardware structure
10279  * @seid: VEB seid to add mirror rule to
10280  * @entries: Buffer which contains the entities to be mirrored
10281  * @count: number of entities contained in the buffer
10282  * @rule_id:the rule_id of the rule to be delete
10283  *
10284  * Delete a mirror rule for a given veb.
10285  *
10286  **/
10287 static enum i40e_status_code
10288 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10289                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10290                 uint16_t count, uint16_t rule_id)
10291 {
10292         struct i40e_aq_desc desc;
10293         struct i40e_aqc_add_delete_mirror_rule cmd;
10294         uint16_t buff_len = 0;
10295         enum i40e_status_code status;
10296         void *buff = NULL;
10297
10298         i40e_fill_default_direct_cmd_desc(&desc,
10299                                           i40e_aqc_opc_delete_mirror_rule);
10300         memset(&cmd, 0, sizeof(cmd));
10301         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10302                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10303                                                           I40E_AQ_FLAG_RD));
10304                 cmd.num_entries = count;
10305                 buff_len = sizeof(uint16_t) * count;
10306                 desc.datalen = rte_cpu_to_le_16(buff_len);
10307                 buff = (void *)entries;
10308         } else
10309                 /* rule id is filled in destination field for deleting mirror rule */
10310                 cmd.destination = rte_cpu_to_le_16(rule_id);
10311
10312         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10313                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10314         cmd.seid = rte_cpu_to_le_16(seid);
10315
10316         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10317         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10318
10319         return status;
10320 }
10321
10322 /**
10323  * i40e_mirror_rule_set
10324  * @dev: pointer to the hardware structure
10325  * @mirror_conf: mirror rule info
10326  * @sw_id: mirror rule's sw_id
10327  * @on: enable/disable
10328  *
10329  * set a mirror rule.
10330  *
10331  **/
10332 static int
10333 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10334                         struct rte_eth_mirror_conf *mirror_conf,
10335                         uint8_t sw_id, uint8_t on)
10336 {
10337         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10338         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10339         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10340         struct i40e_mirror_rule *parent = NULL;
10341         uint16_t seid, dst_seid, rule_id;
10342         uint16_t i, j = 0;
10343         int ret;
10344
10345         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10346
10347         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10348                 PMD_DRV_LOG(ERR,
10349                         "mirror rule can not be configured without veb or vfs.");
10350                 return -ENOSYS;
10351         }
10352         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10353                 PMD_DRV_LOG(ERR, "mirror table is full.");
10354                 return -ENOSPC;
10355         }
10356         if (mirror_conf->dst_pool > pf->vf_num) {
10357                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10358                                  mirror_conf->dst_pool);
10359                 return -EINVAL;
10360         }
10361
10362         seid = pf->main_vsi->veb->seid;
10363
10364         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10365                 if (sw_id <= it->index) {
10366                         mirr_rule = it;
10367                         break;
10368                 }
10369                 parent = it;
10370         }
10371         if (mirr_rule && sw_id == mirr_rule->index) {
10372                 if (on) {
10373                         PMD_DRV_LOG(ERR, "mirror rule exists.");
10374                         return -EEXIST;
10375                 } else {
10376                         ret = i40e_aq_del_mirror_rule(hw, seid,
10377                                         mirr_rule->rule_type,
10378                                         mirr_rule->entries,
10379                                         mirr_rule->num_entries, mirr_rule->id);
10380                         if (ret < 0) {
10381                                 PMD_DRV_LOG(ERR,
10382                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
10383                                         ret, hw->aq.asq_last_status);
10384                                 return -ENOSYS;
10385                         }
10386                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10387                         rte_free(mirr_rule);
10388                         pf->nb_mirror_rule--;
10389                         return 0;
10390                 }
10391         } else if (!on) {
10392                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10393                 return -ENOENT;
10394         }
10395
10396         mirr_rule = rte_zmalloc("i40e_mirror_rule",
10397                                 sizeof(struct i40e_mirror_rule) , 0);
10398         if (!mirr_rule) {
10399                 PMD_DRV_LOG(ERR, "failed to allocate memory");
10400                 return I40E_ERR_NO_MEMORY;
10401         }
10402         switch (mirror_conf->rule_type) {
10403         case ETH_MIRROR_VLAN:
10404                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10405                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10406                                 mirr_rule->entries[j] =
10407                                         mirror_conf->vlan.vlan_id[i];
10408                                 j++;
10409                         }
10410                 }
10411                 if (j == 0) {
10412                         PMD_DRV_LOG(ERR, "vlan is not specified.");
10413                         rte_free(mirr_rule);
10414                         return -EINVAL;
10415                 }
10416                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10417                 break;
10418         case ETH_MIRROR_VIRTUAL_POOL_UP:
10419         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10420                 /* check if the specified pool bit is out of range */
10421                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10422                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
10423                         rte_free(mirr_rule);
10424                         return -EINVAL;
10425                 }
10426                 for (i = 0, j = 0; i < pf->vf_num; i++) {
10427                         if (mirror_conf->pool_mask & (1ULL << i)) {
10428                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10429                                 j++;
10430                         }
10431                 }
10432                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10433                         /* add pf vsi to entries */
10434                         mirr_rule->entries[j] = pf->main_vsi_seid;
10435                         j++;
10436                 }
10437                 if (j == 0) {
10438                         PMD_DRV_LOG(ERR, "pool is not specified.");
10439                         rte_free(mirr_rule);
10440                         return -EINVAL;
10441                 }
10442                 /* egress and ingress in aq commands means from switch but not port */
10443                 mirr_rule->rule_type =
10444                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10445                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10446                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10447                 break;
10448         case ETH_MIRROR_UPLINK_PORT:
10449                 /* egress and ingress in aq commands means from switch but not port*/
10450                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10451                 break;
10452         case ETH_MIRROR_DOWNLINK_PORT:
10453                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10454                 break;
10455         default:
10456                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10457                         mirror_conf->rule_type);
10458                 rte_free(mirr_rule);
10459                 return -EINVAL;
10460         }
10461
10462         /* If the dst_pool is equal to vf_num, consider it as PF */
10463         if (mirror_conf->dst_pool == pf->vf_num)
10464                 dst_seid = pf->main_vsi_seid;
10465         else
10466                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10467
10468         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10469                                       mirr_rule->rule_type, mirr_rule->entries,
10470                                       j, &rule_id);
10471         if (ret < 0) {
10472                 PMD_DRV_LOG(ERR,
10473                         "failed to add mirror rule: ret = %d, aq_err = %d.",
10474                         ret, hw->aq.asq_last_status);
10475                 rte_free(mirr_rule);
10476                 return -ENOSYS;
10477         }
10478
10479         mirr_rule->index = sw_id;
10480         mirr_rule->num_entries = j;
10481         mirr_rule->id = rule_id;
10482         mirr_rule->dst_vsi_seid = dst_seid;
10483
10484         if (parent)
10485                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10486         else
10487                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10488
10489         pf->nb_mirror_rule++;
10490         return 0;
10491 }
10492
10493 /**
10494  * i40e_mirror_rule_reset
10495  * @dev: pointer to the device
10496  * @sw_id: mirror rule's sw_id
10497  *
10498  * reset a mirror rule.
10499  *
10500  **/
10501 static int
10502 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10503 {
10504         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10505         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10506         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10507         uint16_t seid;
10508         int ret;
10509
10510         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10511
10512         seid = pf->main_vsi->veb->seid;
10513
10514         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10515                 if (sw_id == it->index) {
10516                         mirr_rule = it;
10517                         break;
10518                 }
10519         }
10520         if (mirr_rule) {
10521                 ret = i40e_aq_del_mirror_rule(hw, seid,
10522                                 mirr_rule->rule_type,
10523                                 mirr_rule->entries,
10524                                 mirr_rule->num_entries, mirr_rule->id);
10525                 if (ret < 0) {
10526                         PMD_DRV_LOG(ERR,
10527                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
10528                                 ret, hw->aq.asq_last_status);
10529                         return -ENOSYS;
10530                 }
10531                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10532                 rte_free(mirr_rule);
10533                 pf->nb_mirror_rule--;
10534         } else {
10535                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10536                 return -ENOENT;
10537         }
10538         return 0;
10539 }
10540
10541 static uint64_t
10542 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10543 {
10544         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10545         uint64_t systim_cycles;
10546
10547         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10548         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10549                         << 32;
10550
10551         return systim_cycles;
10552 }
10553
10554 static uint64_t
10555 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10556 {
10557         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10558         uint64_t rx_tstamp;
10559
10560         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10561         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10562                         << 32;
10563
10564         return rx_tstamp;
10565 }
10566
10567 static uint64_t
10568 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10569 {
10570         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10571         uint64_t tx_tstamp;
10572
10573         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10574         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10575                         << 32;
10576
10577         return tx_tstamp;
10578 }
10579
10580 static void
10581 i40e_start_timecounters(struct rte_eth_dev *dev)
10582 {
10583         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10584         struct i40e_adapter *adapter = dev->data->dev_private;
10585         struct rte_eth_link link;
10586         uint32_t tsync_inc_l;
10587         uint32_t tsync_inc_h;
10588
10589         /* Get current link speed. */
10590         i40e_dev_link_update(dev, 1);
10591         rte_eth_linkstatus_get(dev, &link);
10592
10593         switch (link.link_speed) {
10594         case ETH_SPEED_NUM_40G:
10595         case ETH_SPEED_NUM_25G:
10596                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10597                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10598                 break;
10599         case ETH_SPEED_NUM_10G:
10600                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10601                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10602                 break;
10603         case ETH_SPEED_NUM_1G:
10604                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10605                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10606                 break;
10607         default:
10608                 tsync_inc_l = 0x0;
10609                 tsync_inc_h = 0x0;
10610         }
10611
10612         /* Set the timesync increment value. */
10613         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10614         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10615
10616         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10617         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10618         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10619
10620         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10621         adapter->systime_tc.cc_shift = 0;
10622         adapter->systime_tc.nsec_mask = 0;
10623
10624         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10625         adapter->rx_tstamp_tc.cc_shift = 0;
10626         adapter->rx_tstamp_tc.nsec_mask = 0;
10627
10628         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10629         adapter->tx_tstamp_tc.cc_shift = 0;
10630         adapter->tx_tstamp_tc.nsec_mask = 0;
10631 }
10632
10633 static int
10634 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10635 {
10636         struct i40e_adapter *adapter = dev->data->dev_private;
10637
10638         adapter->systime_tc.nsec += delta;
10639         adapter->rx_tstamp_tc.nsec += delta;
10640         adapter->tx_tstamp_tc.nsec += delta;
10641
10642         return 0;
10643 }
10644
10645 static int
10646 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10647 {
10648         uint64_t ns;
10649         struct i40e_adapter *adapter = dev->data->dev_private;
10650
10651         ns = rte_timespec_to_ns(ts);
10652
10653         /* Set the timecounters to a new value. */
10654         adapter->systime_tc.nsec = ns;
10655         adapter->rx_tstamp_tc.nsec = ns;
10656         adapter->tx_tstamp_tc.nsec = ns;
10657
10658         return 0;
10659 }
10660
10661 static int
10662 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10663 {
10664         uint64_t ns, systime_cycles;
10665         struct i40e_adapter *adapter = dev->data->dev_private;
10666
10667         systime_cycles = i40e_read_systime_cyclecounter(dev);
10668         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10669         *ts = rte_ns_to_timespec(ns);
10670
10671         return 0;
10672 }
10673
10674 static int
10675 i40e_timesync_enable(struct rte_eth_dev *dev)
10676 {
10677         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10678         uint32_t tsync_ctl_l;
10679         uint32_t tsync_ctl_h;
10680
10681         /* Stop the timesync system time. */
10682         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10683         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10684         /* Reset the timesync system time value. */
10685         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10686         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10687
10688         i40e_start_timecounters(dev);
10689
10690         /* Clear timesync registers. */
10691         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10692         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10693         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10694         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10695         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10696         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10697
10698         /* Enable timestamping of PTP packets. */
10699         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10700         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10701
10702         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10703         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10704         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10705
10706         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10707         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10708
10709         return 0;
10710 }
10711
10712 static int
10713 i40e_timesync_disable(struct rte_eth_dev *dev)
10714 {
10715         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10716         uint32_t tsync_ctl_l;
10717         uint32_t tsync_ctl_h;
10718
10719         /* Disable timestamping of transmitted PTP packets. */
10720         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10721         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10722
10723         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10724         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10725
10726         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10727         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10728
10729         /* Reset the timesync increment value. */
10730         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10731         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10732
10733         return 0;
10734 }
10735
10736 static int
10737 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10738                                 struct timespec *timestamp, uint32_t flags)
10739 {
10740         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10741         struct i40e_adapter *adapter = dev->data->dev_private;
10742         uint32_t sync_status;
10743         uint32_t index = flags & 0x03;
10744         uint64_t rx_tstamp_cycles;
10745         uint64_t ns;
10746
10747         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10748         if ((sync_status & (1 << index)) == 0)
10749                 return -EINVAL;
10750
10751         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10752         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10753         *timestamp = rte_ns_to_timespec(ns);
10754
10755         return 0;
10756 }
10757
10758 static int
10759 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10760                                 struct timespec *timestamp)
10761 {
10762         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10763         struct i40e_adapter *adapter = dev->data->dev_private;
10764         uint32_t sync_status;
10765         uint64_t tx_tstamp_cycles;
10766         uint64_t ns;
10767
10768         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10769         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10770                 return -EINVAL;
10771
10772         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10773         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10774         *timestamp = rte_ns_to_timespec(ns);
10775
10776         return 0;
10777 }
10778
10779 /*
10780  * i40e_parse_dcb_configure - parse dcb configure from user
10781  * @dev: the device being configured
10782  * @dcb_cfg: pointer of the result of parse
10783  * @*tc_map: bit map of enabled traffic classes
10784  *
10785  * Returns 0 on success, negative value on failure
10786  */
10787 static int
10788 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10789                          struct i40e_dcbx_config *dcb_cfg,
10790                          uint8_t *tc_map)
10791 {
10792         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10793         uint8_t i, tc_bw, bw_lf;
10794
10795         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10796
10797         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10798         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10799                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10800                 return -EINVAL;
10801         }
10802
10803         /* assume each tc has the same bw */
10804         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10805         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10806                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10807         /* to ensure the sum of tcbw is equal to 100 */
10808         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10809         for (i = 0; i < bw_lf; i++)
10810                 dcb_cfg->etscfg.tcbwtable[i]++;
10811
10812         /* assume each tc has the same Transmission Selection Algorithm */
10813         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10814                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10815
10816         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10817                 dcb_cfg->etscfg.prioritytable[i] =
10818                                 dcb_rx_conf->dcb_tc[i];
10819
10820         /* FW needs one App to configure HW */
10821         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10822         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10823         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10824         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10825
10826         if (dcb_rx_conf->nb_tcs == 0)
10827                 *tc_map = 1; /* tc0 only */
10828         else
10829                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10830
10831         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10832                 dcb_cfg->pfc.willing = 0;
10833                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10834                 dcb_cfg->pfc.pfcenable = *tc_map;
10835         }
10836         return 0;
10837 }
10838
10839
10840 static enum i40e_status_code
10841 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10842                               struct i40e_aqc_vsi_properties_data *info,
10843                               uint8_t enabled_tcmap)
10844 {
10845         enum i40e_status_code ret;
10846         int i, total_tc = 0;
10847         uint16_t qpnum_per_tc, bsf, qp_idx;
10848         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10849         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10850         uint16_t used_queues;
10851
10852         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10853         if (ret != I40E_SUCCESS)
10854                 return ret;
10855
10856         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10857                 if (enabled_tcmap & (1 << i))
10858                         total_tc++;
10859         }
10860         if (total_tc == 0)
10861                 total_tc = 1;
10862         vsi->enabled_tc = enabled_tcmap;
10863
10864         /* different VSI has different queues assigned */
10865         if (vsi->type == I40E_VSI_MAIN)
10866                 used_queues = dev_data->nb_rx_queues -
10867                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10868         else if (vsi->type == I40E_VSI_VMDQ2)
10869                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10870         else {
10871                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10872                 return I40E_ERR_NO_AVAILABLE_VSI;
10873         }
10874
10875         qpnum_per_tc = used_queues / total_tc;
10876         /* Number of queues per enabled TC */
10877         if (qpnum_per_tc == 0) {
10878                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10879                 return I40E_ERR_INVALID_QP_ID;
10880         }
10881         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10882                                 I40E_MAX_Q_PER_TC);
10883         bsf = rte_bsf32(qpnum_per_tc);
10884
10885         /**
10886          * Configure TC and queue mapping parameters, for enabled TC,
10887          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10888          * default queue will serve it.
10889          */
10890         qp_idx = 0;
10891         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10892                 if (vsi->enabled_tc & (1 << i)) {
10893                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10894                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10895                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10896                         qp_idx += qpnum_per_tc;
10897                 } else
10898                         info->tc_mapping[i] = 0;
10899         }
10900
10901         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10902         if (vsi->type == I40E_VSI_SRIOV) {
10903                 info->mapping_flags |=
10904                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10905                 for (i = 0; i < vsi->nb_qps; i++)
10906                         info->queue_mapping[i] =
10907                                 rte_cpu_to_le_16(vsi->base_queue + i);
10908         } else {
10909                 info->mapping_flags |=
10910                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10911                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10912         }
10913         info->valid_sections |=
10914                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10915
10916         return I40E_SUCCESS;
10917 }
10918
10919 /*
10920  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10921  * @veb: VEB to be configured
10922  * @tc_map: enabled TC bitmap
10923  *
10924  * Returns 0 on success, negative value on failure
10925  */
10926 static enum i40e_status_code
10927 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10928 {
10929         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10930         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10931         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10932         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10933         enum i40e_status_code ret = I40E_SUCCESS;
10934         int i;
10935         uint32_t bw_max;
10936
10937         /* Check if enabled_tc is same as existing or new TCs */
10938         if (veb->enabled_tc == tc_map)
10939                 return ret;
10940
10941         /* configure tc bandwidth */
10942         memset(&veb_bw, 0, sizeof(veb_bw));
10943         veb_bw.tc_valid_bits = tc_map;
10944         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10945         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10946                 if (tc_map & BIT_ULL(i))
10947                         veb_bw.tc_bw_share_credits[i] = 1;
10948         }
10949         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10950                                                    &veb_bw, NULL);
10951         if (ret) {
10952                 PMD_INIT_LOG(ERR,
10953                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10954                         hw->aq.asq_last_status);
10955                 return ret;
10956         }
10957
10958         memset(&ets_query, 0, sizeof(ets_query));
10959         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10960                                                    &ets_query, NULL);
10961         if (ret != I40E_SUCCESS) {
10962                 PMD_DRV_LOG(ERR,
10963                         "Failed to get switch_comp ETS configuration %u",
10964                         hw->aq.asq_last_status);
10965                 return ret;
10966         }
10967         memset(&bw_query, 0, sizeof(bw_query));
10968         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10969                                                   &bw_query, NULL);
10970         if (ret != I40E_SUCCESS) {
10971                 PMD_DRV_LOG(ERR,
10972                         "Failed to get switch_comp bandwidth configuration %u",
10973                         hw->aq.asq_last_status);
10974                 return ret;
10975         }
10976
10977         /* store and print out BW info */
10978         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10979         veb->bw_info.bw_max = ets_query.tc_bw_max;
10980         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10981         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10982         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10983                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10984                      I40E_16_BIT_WIDTH);
10985         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10986                 veb->bw_info.bw_ets_share_credits[i] =
10987                                 bw_query.tc_bw_share_credits[i];
10988                 veb->bw_info.bw_ets_credits[i] =
10989                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10990                 /* 4 bits per TC, 4th bit is reserved */
10991                 veb->bw_info.bw_ets_max[i] =
10992                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10993                                   RTE_LEN2MASK(3, uint8_t));
10994                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10995                             veb->bw_info.bw_ets_share_credits[i]);
10996                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10997                             veb->bw_info.bw_ets_credits[i]);
10998                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10999                             veb->bw_info.bw_ets_max[i]);
11000         }
11001
11002         veb->enabled_tc = tc_map;
11003
11004         return ret;
11005 }
11006
11007
11008 /*
11009  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
11010  * @vsi: VSI to be configured
11011  * @tc_map: enabled TC bitmap
11012  *
11013  * Returns 0 on success, negative value on failure
11014  */
11015 static enum i40e_status_code
11016 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11017 {
11018         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11019         struct i40e_vsi_context ctxt;
11020         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11021         enum i40e_status_code ret = I40E_SUCCESS;
11022         int i;
11023
11024         /* Check if enabled_tc is same as existing or new TCs */
11025         if (vsi->enabled_tc == tc_map)
11026                 return ret;
11027
11028         /* configure tc bandwidth */
11029         memset(&bw_data, 0, sizeof(bw_data));
11030         bw_data.tc_valid_bits = tc_map;
11031         /* Enable ETS TCs with equal BW Share for now across all VSIs */
11032         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11033                 if (tc_map & BIT_ULL(i))
11034                         bw_data.tc_bw_credits[i] = 1;
11035         }
11036         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11037         if (ret) {
11038                 PMD_INIT_LOG(ERR,
11039                         "AQ command Config VSI BW allocation per TC failed = %d",
11040                         hw->aq.asq_last_status);
11041                 goto out;
11042         }
11043         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11044                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11045
11046         /* Update Queue Pairs Mapping for currently enabled UPs */
11047         ctxt.seid = vsi->seid;
11048         ctxt.pf_num = hw->pf_id;
11049         ctxt.vf_num = 0;
11050         ctxt.uplink_seid = vsi->uplink_seid;
11051         ctxt.info = vsi->info;
11052         i40e_get_cap(hw);
11053         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11054         if (ret)
11055                 goto out;
11056
11057         /* Update the VSI after updating the VSI queue-mapping information */
11058         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11059         if (ret) {
11060                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11061                         hw->aq.asq_last_status);
11062                 goto out;
11063         }
11064         /* update the local VSI info with updated queue map */
11065         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11066                                         sizeof(vsi->info.tc_mapping));
11067         rte_memcpy(&vsi->info.queue_mapping,
11068                         &ctxt.info.queue_mapping,
11069                 sizeof(vsi->info.queue_mapping));
11070         vsi->info.mapping_flags = ctxt.info.mapping_flags;
11071         vsi->info.valid_sections = 0;
11072
11073         /* query and update current VSI BW information */
11074         ret = i40e_vsi_get_bw_config(vsi);
11075         if (ret) {
11076                 PMD_INIT_LOG(ERR,
11077                          "Failed updating vsi bw info, err %s aq_err %s",
11078                          i40e_stat_str(hw, ret),
11079                          i40e_aq_str(hw, hw->aq.asq_last_status));
11080                 goto out;
11081         }
11082
11083         vsi->enabled_tc = tc_map;
11084
11085 out:
11086         return ret;
11087 }
11088
11089 /*
11090  * i40e_dcb_hw_configure - program the dcb setting to hw
11091  * @pf: pf the configuration is taken on
11092  * @new_cfg: new configuration
11093  * @tc_map: enabled TC bitmap
11094  *
11095  * Returns 0 on success, negative value on failure
11096  */
11097 static enum i40e_status_code
11098 i40e_dcb_hw_configure(struct i40e_pf *pf,
11099                       struct i40e_dcbx_config *new_cfg,
11100                       uint8_t tc_map)
11101 {
11102         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11103         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11104         struct i40e_vsi *main_vsi = pf->main_vsi;
11105         struct i40e_vsi_list *vsi_list;
11106         enum i40e_status_code ret;
11107         int i;
11108         uint32_t val;
11109
11110         /* Use the FW API if FW > v4.4*/
11111         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11112               (hw->aq.fw_maj_ver >= 5))) {
11113                 PMD_INIT_LOG(ERR,
11114                         "FW < v4.4, can not use FW LLDP API to configure DCB");
11115                 return I40E_ERR_FIRMWARE_API_VERSION;
11116         }
11117
11118         /* Check if need reconfiguration */
11119         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11120                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11121                 return I40E_SUCCESS;
11122         }
11123
11124         /* Copy the new config to the current config */
11125         *old_cfg = *new_cfg;
11126         old_cfg->etsrec = old_cfg->etscfg;
11127         ret = i40e_set_dcb_config(hw);
11128         if (ret) {
11129                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11130                          i40e_stat_str(hw, ret),
11131                          i40e_aq_str(hw, hw->aq.asq_last_status));
11132                 return ret;
11133         }
11134         /* set receive Arbiter to RR mode and ETS scheme by default */
11135         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11136                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11137                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11138                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11139                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11140                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11141                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11142                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11143                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11144                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11145                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11146                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11147                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11148         }
11149         /* get local mib to check whether it is configured correctly */
11150         /* IEEE mode */
11151         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11152         /* Get Local DCB Config */
11153         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11154                                      &hw->local_dcbx_config);
11155
11156         /* if Veb is created, need to update TC of it at first */
11157         if (main_vsi->veb) {
11158                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11159                 if (ret)
11160                         PMD_INIT_LOG(WARNING,
11161                                  "Failed configuring TC for VEB seid=%d",
11162                                  main_vsi->veb->seid);
11163         }
11164         /* Update each VSI */
11165         i40e_vsi_config_tc(main_vsi, tc_map);
11166         if (main_vsi->veb) {
11167                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11168                         /* Beside main VSI and VMDQ VSIs, only enable default
11169                          * TC for other VSIs
11170                          */
11171                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11172                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11173                                                          tc_map);
11174                         else
11175                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11176                                                          I40E_DEFAULT_TCMAP);
11177                         if (ret)
11178                                 PMD_INIT_LOG(WARNING,
11179                                         "Failed configuring TC for VSI seid=%d",
11180                                         vsi_list->vsi->seid);
11181                         /* continue */
11182                 }
11183         }
11184         return I40E_SUCCESS;
11185 }
11186
11187 /*
11188  * i40e_dcb_init_configure - initial dcb config
11189  * @dev: device being configured
11190  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11191  *
11192  * Returns 0 on success, negative value on failure
11193  */
11194 int
11195 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11196 {
11197         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11198         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11199         int i, ret = 0;
11200
11201         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11202                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11203                 return -ENOTSUP;
11204         }
11205
11206         /* DCB initialization:
11207          * Update DCB configuration from the Firmware and configure
11208          * LLDP MIB change event.
11209          */
11210         if (sw_dcb == TRUE) {
11211                 /* Stopping lldp is necessary for DPDK, but it will cause
11212                  * DCB init failed. For i40e_init_dcb(), the prerequisite
11213                  * for successful initialization of DCB is that LLDP is
11214                  * enabled. So it is needed to start lldp before DCB init
11215                  * and stop it after initialization.
11216                  */
11217                 ret = i40e_aq_start_lldp(hw, true, NULL);
11218                 if (ret != I40E_SUCCESS)
11219                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11220
11221                 ret = i40e_init_dcb(hw, true);
11222                 /* If lldp agent is stopped, the return value from
11223                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11224                  * adminq status. Otherwise, it should return success.
11225                  */
11226                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11227                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11228                         memset(&hw->local_dcbx_config, 0,
11229                                 sizeof(struct i40e_dcbx_config));
11230                         /* set dcb default configuration */
11231                         hw->local_dcbx_config.etscfg.willing = 0;
11232                         hw->local_dcbx_config.etscfg.maxtcs = 0;
11233                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11234                         hw->local_dcbx_config.etscfg.tsatable[0] =
11235                                                 I40E_IEEE_TSA_ETS;
11236                         /* all UPs mapping to TC0 */
11237                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11238                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11239                         hw->local_dcbx_config.etsrec =
11240                                 hw->local_dcbx_config.etscfg;
11241                         hw->local_dcbx_config.pfc.willing = 0;
11242                         hw->local_dcbx_config.pfc.pfccap =
11243                                                 I40E_MAX_TRAFFIC_CLASS;
11244                         /* FW needs one App to configure HW */
11245                         hw->local_dcbx_config.numapps = 1;
11246                         hw->local_dcbx_config.app[0].selector =
11247                                                 I40E_APP_SEL_ETHTYPE;
11248                         hw->local_dcbx_config.app[0].priority = 3;
11249                         hw->local_dcbx_config.app[0].protocolid =
11250                                                 I40E_APP_PROTOID_FCOE;
11251                         ret = i40e_set_dcb_config(hw);
11252                         if (ret) {
11253                                 PMD_INIT_LOG(ERR,
11254                                         "default dcb config fails. err = %d, aq_err = %d.",
11255                                         ret, hw->aq.asq_last_status);
11256                                 return -ENOSYS;
11257                         }
11258                 } else {
11259                         PMD_INIT_LOG(ERR,
11260                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
11261                                 ret, hw->aq.asq_last_status);
11262                         return -ENOTSUP;
11263                 }
11264
11265                 if (i40e_need_stop_lldp(dev)) {
11266                         ret = i40e_aq_stop_lldp(hw, true, true, NULL);
11267                         if (ret != I40E_SUCCESS)
11268                                 PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
11269                 }
11270         } else {
11271                 ret = i40e_aq_start_lldp(hw, true, NULL);
11272                 if (ret != I40E_SUCCESS)
11273                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11274
11275                 ret = i40e_init_dcb(hw, true);
11276                 if (!ret) {
11277                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11278                                 PMD_INIT_LOG(ERR,
11279                                         "HW doesn't support DCBX offload.");
11280                                 return -ENOTSUP;
11281                         }
11282                 } else {
11283                         PMD_INIT_LOG(ERR,
11284                                 "DCBX configuration failed, err = %d, aq_err = %d.",
11285                                 ret, hw->aq.asq_last_status);
11286                         return -ENOTSUP;
11287                 }
11288         }
11289         return 0;
11290 }
11291
11292 /*
11293  * i40e_dcb_setup - setup dcb related config
11294  * @dev: device being configured
11295  *
11296  * Returns 0 on success, negative value on failure
11297  */
11298 static int
11299 i40e_dcb_setup(struct rte_eth_dev *dev)
11300 {
11301         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11302         struct i40e_dcbx_config dcb_cfg;
11303         uint8_t tc_map = 0;
11304         int ret = 0;
11305
11306         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11307                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11308                 return -ENOTSUP;
11309         }
11310
11311         if (pf->vf_num != 0)
11312                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11313
11314         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11315         if (ret) {
11316                 PMD_INIT_LOG(ERR, "invalid dcb config");
11317                 return -EINVAL;
11318         }
11319         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11320         if (ret) {
11321                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11322                 return -ENOSYS;
11323         }
11324
11325         return 0;
11326 }
11327
11328 static int
11329 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11330                       struct rte_eth_dcb_info *dcb_info)
11331 {
11332         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11333         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11334         struct i40e_vsi *vsi = pf->main_vsi;
11335         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11336         uint16_t bsf, tc_mapping;
11337         int i, j = 0;
11338
11339         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11340                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11341         else
11342                 dcb_info->nb_tcs = 1;
11343         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11344                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11345         for (i = 0; i < dcb_info->nb_tcs; i++)
11346                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11347
11348         /* get queue mapping if vmdq is disabled */
11349         if (!pf->nb_cfg_vmdq_vsi) {
11350                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11351                         if (!(vsi->enabled_tc & (1 << i)))
11352                                 continue;
11353                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11354                         dcb_info->tc_queue.tc_rxq[j][i].base =
11355                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11356                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11357                         dcb_info->tc_queue.tc_txq[j][i].base =
11358                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11359                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11360                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11361                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11362                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11363                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11364                 }
11365                 return 0;
11366         }
11367
11368         /* get queue mapping if vmdq is enabled */
11369         do {
11370                 vsi = pf->vmdq[j].vsi;
11371                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11372                         if (!(vsi->enabled_tc & (1 << i)))
11373                                 continue;
11374                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11375                         dcb_info->tc_queue.tc_rxq[j][i].base =
11376                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11377                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11378                         dcb_info->tc_queue.tc_txq[j][i].base =
11379                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11380                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11381                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11382                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11383                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11384                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11385                 }
11386                 j++;
11387         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11388         return 0;
11389 }
11390
11391 static int
11392 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11393 {
11394         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11395         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11396         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11397         uint16_t msix_intr;
11398
11399         msix_intr = intr_handle->intr_vec[queue_id];
11400         if (msix_intr == I40E_MISC_VEC_ID)
11401                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11402                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
11403                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11404                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11405         else
11406                 I40E_WRITE_REG(hw,
11407                                I40E_PFINT_DYN_CTLN(msix_intr -
11408                                                    I40E_RX_VEC_START),
11409                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
11410                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11411                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11412
11413         I40E_WRITE_FLUSH(hw);
11414         rte_intr_ack(&pci_dev->intr_handle);
11415
11416         return 0;
11417 }
11418
11419 static int
11420 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11421 {
11422         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11423         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11424         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11425         uint16_t msix_intr;
11426
11427         msix_intr = intr_handle->intr_vec[queue_id];
11428         if (msix_intr == I40E_MISC_VEC_ID)
11429                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11430                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11431         else
11432                 I40E_WRITE_REG(hw,
11433                                I40E_PFINT_DYN_CTLN(msix_intr -
11434                                                    I40E_RX_VEC_START),
11435                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11436         I40E_WRITE_FLUSH(hw);
11437
11438         return 0;
11439 }
11440
11441 /**
11442  * This function is used to check if the register is valid.
11443  * Below is the valid registers list for X722 only:
11444  * 0x2b800--0x2bb00
11445  * 0x38700--0x38a00
11446  * 0x3d800--0x3db00
11447  * 0x208e00--0x209000
11448  * 0x20be00--0x20c000
11449  * 0x263c00--0x264000
11450  * 0x265c00--0x266000
11451  */
11452 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
11453 {
11454         if ((type != I40E_MAC_X722) &&
11455             ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
11456              (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
11457              (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
11458              (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
11459              (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
11460              (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
11461              (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
11462                 return 0;
11463         else
11464                 return 1;
11465 }
11466
11467 static int i40e_get_regs(struct rte_eth_dev *dev,
11468                          struct rte_dev_reg_info *regs)
11469 {
11470         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11471         uint32_t *ptr_data = regs->data;
11472         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11473         const struct i40e_reg_info *reg_info;
11474
11475         if (ptr_data == NULL) {
11476                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11477                 regs->width = sizeof(uint32_t);
11478                 return 0;
11479         }
11480
11481         /* The first few registers have to be read using AQ operations */
11482         reg_idx = 0;
11483         while (i40e_regs_adminq[reg_idx].name) {
11484                 reg_info = &i40e_regs_adminq[reg_idx++];
11485                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11486                         for (arr_idx2 = 0;
11487                                         arr_idx2 <= reg_info->count2;
11488                                         arr_idx2++) {
11489                                 reg_offset = arr_idx * reg_info->stride1 +
11490                                         arr_idx2 * reg_info->stride2;
11491                                 reg_offset += reg_info->base_addr;
11492                                 ptr_data[reg_offset >> 2] =
11493                                         i40e_read_rx_ctl(hw, reg_offset);
11494                         }
11495         }
11496
11497         /* The remaining registers can be read using primitives */
11498         reg_idx = 0;
11499         while (i40e_regs_others[reg_idx].name) {
11500                 reg_info = &i40e_regs_others[reg_idx++];
11501                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11502                         for (arr_idx2 = 0;
11503                                         arr_idx2 <= reg_info->count2;
11504                                         arr_idx2++) {
11505                                 reg_offset = arr_idx * reg_info->stride1 +
11506                                         arr_idx2 * reg_info->stride2;
11507                                 reg_offset += reg_info->base_addr;
11508                                 if (!i40e_valid_regs(hw->mac.type, reg_offset))
11509                                         ptr_data[reg_offset >> 2] = 0;
11510                                 else
11511                                         ptr_data[reg_offset >> 2] =
11512                                                 I40E_READ_REG(hw, reg_offset);
11513                         }
11514         }
11515
11516         return 0;
11517 }
11518
11519 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11520 {
11521         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11522
11523         /* Convert word count to byte count */
11524         return hw->nvm.sr_size << 1;
11525 }
11526
11527 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11528                            struct rte_dev_eeprom_info *eeprom)
11529 {
11530         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11531         uint16_t *data = eeprom->data;
11532         uint16_t offset, length, cnt_words;
11533         int ret_code;
11534
11535         offset = eeprom->offset >> 1;
11536         length = eeprom->length >> 1;
11537         cnt_words = length;
11538
11539         if (offset > hw->nvm.sr_size ||
11540                 offset + length > hw->nvm.sr_size) {
11541                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11542                 return -EINVAL;
11543         }
11544
11545         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11546
11547         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11548         if (ret_code != I40E_SUCCESS || cnt_words != length) {
11549                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11550                 return -EIO;
11551         }
11552
11553         return 0;
11554 }
11555
11556 static int i40e_get_module_info(struct rte_eth_dev *dev,
11557                                 struct rte_eth_dev_module_info *modinfo)
11558 {
11559         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11560         uint32_t sff8472_comp = 0;
11561         uint32_t sff8472_swap = 0;
11562         uint32_t sff8636_rev = 0;
11563         i40e_status status;
11564         uint32_t type = 0;
11565
11566         /* Check if firmware supports reading module EEPROM. */
11567         if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11568                 PMD_DRV_LOG(ERR,
11569                             "Module EEPROM memory read not supported. "
11570                             "Please update the NVM image.\n");
11571                 return -EINVAL;
11572         }
11573
11574         status = i40e_update_link_info(hw);
11575         if (status)
11576                 return -EIO;
11577
11578         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11579                 PMD_DRV_LOG(ERR,
11580                             "Cannot read module EEPROM memory. "
11581                             "No module connected.\n");
11582                 return -EINVAL;
11583         }
11584
11585         type = hw->phy.link_info.module_type[0];
11586
11587         switch (type) {
11588         case I40E_MODULE_TYPE_SFP:
11589                 status = i40e_aq_get_phy_register(hw,
11590                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11591                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
11592                                 I40E_MODULE_SFF_8472_COMP,
11593                                 &sff8472_comp, NULL);
11594                 if (status)
11595                         return -EIO;
11596
11597                 status = i40e_aq_get_phy_register(hw,
11598                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11599                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
11600                                 I40E_MODULE_SFF_8472_SWAP,
11601                                 &sff8472_swap, NULL);
11602                 if (status)
11603                         return -EIO;
11604
11605                 /* Check if the module requires address swap to access
11606                  * the other EEPROM memory page.
11607                  */
11608                 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11609                         PMD_DRV_LOG(WARNING,
11610                                     "Module address swap to access "
11611                                     "page 0xA2 is not supported.\n");
11612                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11613                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11614                 } else if (sff8472_comp == 0x00) {
11615                         /* Module is not SFF-8472 compliant */
11616                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11617                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11618                 } else {
11619                         modinfo->type = RTE_ETH_MODULE_SFF_8472;
11620                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11621                 }
11622                 break;
11623         case I40E_MODULE_TYPE_QSFP_PLUS:
11624                 /* Read from memory page 0. */
11625                 status = i40e_aq_get_phy_register(hw,
11626                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11627                                 0, 1,
11628                                 I40E_MODULE_REVISION_ADDR,
11629                                 &sff8636_rev, NULL);
11630                 if (status)
11631                         return -EIO;
11632                 /* Determine revision compliance byte */
11633                 if (sff8636_rev > 0x02) {
11634                         /* Module is SFF-8636 compliant */
11635                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
11636                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11637                 } else {
11638                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
11639                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11640                 }
11641                 break;
11642         case I40E_MODULE_TYPE_QSFP28:
11643                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
11644                 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11645                 break;
11646         default:
11647                 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11648                 return -EINVAL;
11649         }
11650         return 0;
11651 }
11652
11653 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11654                                   struct rte_dev_eeprom_info *info)
11655 {
11656         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11657         bool is_sfp = false;
11658         i40e_status status;
11659         uint8_t *data;
11660         uint32_t value = 0;
11661         uint32_t i;
11662
11663         if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11664                 is_sfp = true;
11665
11666         data = info->data;
11667         for (i = 0; i < info->length; i++) {
11668                 u32 offset = i + info->offset;
11669                 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11670
11671                 /* Check if we need to access the other memory page */
11672                 if (is_sfp) {
11673                         if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11674                                 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11675                                 addr = I40E_I2C_EEPROM_DEV_ADDR2;
11676                         }
11677                 } else {
11678                         while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11679                                 /* Compute memory page number and offset. */
11680                                 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11681                                 addr++;
11682                         }
11683                 }
11684                 status = i40e_aq_get_phy_register(hw,
11685                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11686                                 addr, 1, offset, &value, NULL);
11687                 if (status)
11688                         return -EIO;
11689                 data[i] = (uint8_t)value;
11690         }
11691         return 0;
11692 }
11693
11694 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11695                                      struct rte_ether_addr *mac_addr)
11696 {
11697         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11698         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11699         struct i40e_vsi *vsi = pf->main_vsi;
11700         struct i40e_mac_filter_info mac_filter;
11701         struct i40e_mac_filter *f;
11702         int ret;
11703
11704         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
11705                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11706                 return -EINVAL;
11707         }
11708
11709         TAILQ_FOREACH(f, &vsi->mac_list, next) {
11710                 if (rte_is_same_ether_addr(&pf->dev_addr,
11711                                                 &f->mac_info.mac_addr))
11712                         break;
11713         }
11714
11715         if (f == NULL) {
11716                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11717                 return -EIO;
11718         }
11719
11720         mac_filter = f->mac_info;
11721         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11722         if (ret != I40E_SUCCESS) {
11723                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11724                 return -EIO;
11725         }
11726         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11727         ret = i40e_vsi_add_mac(vsi, &mac_filter);
11728         if (ret != I40E_SUCCESS) {
11729                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11730                 return -EIO;
11731         }
11732         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11733
11734         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11735                                         mac_addr->addr_bytes, NULL);
11736         if (ret != I40E_SUCCESS) {
11737                 PMD_DRV_LOG(ERR, "Failed to change mac");
11738                 return -EIO;
11739         }
11740
11741         return 0;
11742 }
11743
11744 static int
11745 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11746 {
11747         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11748         struct rte_eth_dev_data *dev_data = pf->dev_data;
11749         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11750         int ret = 0;
11751
11752         /* check if mtu is within the allowed range */
11753         if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
11754                 return -EINVAL;
11755
11756         /* mtu setting is forbidden if port is start */
11757         if (dev_data->dev_started) {
11758                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11759                             dev_data->port_id);
11760                 return -EBUSY;
11761         }
11762
11763         if (frame_size > I40E_ETH_MAX_LEN)
11764                 dev_data->dev_conf.rxmode.offloads |=
11765                         DEV_RX_OFFLOAD_JUMBO_FRAME;
11766         else
11767                 dev_data->dev_conf.rxmode.offloads &=
11768                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
11769
11770         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11771
11772         return ret;
11773 }
11774
11775 /* Restore ethertype filter */
11776 static void
11777 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11778 {
11779         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11780         struct i40e_ethertype_filter_list
11781                 *ethertype_list = &pf->ethertype.ethertype_list;
11782         struct i40e_ethertype_filter *f;
11783         struct i40e_control_filter_stats stats;
11784         uint16_t flags;
11785
11786         TAILQ_FOREACH(f, ethertype_list, rules) {
11787                 flags = 0;
11788                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11789                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11790                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11791                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11792                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11793
11794                 memset(&stats, 0, sizeof(stats));
11795                 i40e_aq_add_rem_control_packet_filter(hw,
11796                                             f->input.mac_addr.addr_bytes,
11797                                             f->input.ether_type,
11798                                             flags, pf->main_vsi->seid,
11799                                             f->queue, 1, &stats, NULL);
11800         }
11801         PMD_DRV_LOG(INFO, "Ethertype filter:"
11802                     " mac_etype_used = %u, etype_used = %u,"
11803                     " mac_etype_free = %u, etype_free = %u",
11804                     stats.mac_etype_used, stats.etype_used,
11805                     stats.mac_etype_free, stats.etype_free);
11806 }
11807
11808 /* Restore tunnel filter */
11809 static void
11810 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11811 {
11812         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11813         struct i40e_vsi *vsi;
11814         struct i40e_pf_vf *vf;
11815         struct i40e_tunnel_filter_list
11816                 *tunnel_list = &pf->tunnel.tunnel_list;
11817         struct i40e_tunnel_filter *f;
11818         struct i40e_aqc_cloud_filters_element_bb cld_filter;
11819         bool big_buffer = 0;
11820
11821         TAILQ_FOREACH(f, tunnel_list, rules) {
11822                 if (!f->is_to_vf)
11823                         vsi = pf->main_vsi;
11824                 else {
11825                         vf = &pf->vfs[f->vf_id];
11826                         vsi = vf->vsi;
11827                 }
11828                 memset(&cld_filter, 0, sizeof(cld_filter));
11829                 rte_ether_addr_copy((struct rte_ether_addr *)
11830                                 &f->input.outer_mac,
11831                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
11832                 rte_ether_addr_copy((struct rte_ether_addr *)
11833                                 &f->input.inner_mac,
11834                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
11835                 cld_filter.element.inner_vlan = f->input.inner_vlan;
11836                 cld_filter.element.flags = f->input.flags;
11837                 cld_filter.element.tenant_id = f->input.tenant_id;
11838                 cld_filter.element.queue_number = f->queue;
11839                 rte_memcpy(cld_filter.general_fields,
11840                            f->input.general_fields,
11841                            sizeof(f->input.general_fields));
11842
11843                 if (((f->input.flags &
11844                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11845                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11846                     ((f->input.flags &
11847                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11848                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11849                     ((f->input.flags &
11850                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11851                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
11852                         big_buffer = 1;
11853
11854                 if (big_buffer)
11855                         i40e_aq_add_cloud_filters_bb(hw,
11856                                         vsi->seid, &cld_filter, 1);
11857                 else
11858                         i40e_aq_add_cloud_filters(hw, vsi->seid,
11859                                                   &cld_filter.element, 1);
11860         }
11861 }
11862
11863 static void
11864 i40e_filter_restore(struct i40e_pf *pf)
11865 {
11866         i40e_ethertype_filter_restore(pf);
11867         i40e_tunnel_filter_restore(pf);
11868         i40e_fdir_filter_restore(pf);
11869         (void)i40e_hash_filter_restore(pf);
11870 }
11871
11872 bool
11873 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11874 {
11875         if (strcmp(dev->device->driver->name, drv->driver.name))
11876                 return false;
11877
11878         return true;
11879 }
11880
11881 bool
11882 is_i40e_supported(struct rte_eth_dev *dev)
11883 {
11884         return is_device_supported(dev, &rte_i40e_pmd);
11885 }
11886
11887 struct i40e_customized_pctype*
11888 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11889 {
11890         int i;
11891
11892         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11893                 if (pf->customized_pctype[i].index == index)
11894                         return &pf->customized_pctype[i];
11895         }
11896         return NULL;
11897 }
11898
11899 static int
11900 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11901                               uint32_t pkg_size, uint32_t proto_num,
11902                               struct rte_pmd_i40e_proto_info *proto,
11903                               enum rte_pmd_i40e_package_op op)
11904 {
11905         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11906         uint32_t pctype_num;
11907         struct rte_pmd_i40e_ptype_info *pctype;
11908         uint32_t buff_size;
11909         struct i40e_customized_pctype *new_pctype = NULL;
11910         uint8_t proto_id;
11911         uint8_t pctype_value;
11912         char name[64];
11913         uint32_t i, j, n;
11914         int ret;
11915
11916         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11917             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11918                 PMD_DRV_LOG(ERR, "Unsupported operation.");
11919                 return -1;
11920         }
11921
11922         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11923                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
11924                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11925         if (ret) {
11926                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11927                 return -1;
11928         }
11929         if (!pctype_num) {
11930                 PMD_DRV_LOG(INFO, "No new pctype added");
11931                 return -1;
11932         }
11933
11934         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11935         pctype = rte_zmalloc("new_pctype", buff_size, 0);
11936         if (!pctype) {
11937                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11938                 return -1;
11939         }
11940         /* get information about new pctype list */
11941         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11942                                         (uint8_t *)pctype, buff_size,
11943                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11944         if (ret) {
11945                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11946                 rte_free(pctype);
11947                 return -1;
11948         }
11949
11950         /* Update customized pctype. */
11951         for (i = 0; i < pctype_num; i++) {
11952                 pctype_value = pctype[i].ptype_id;
11953                 memset(name, 0, sizeof(name));
11954                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11955                         proto_id = pctype[i].protocols[j];
11956                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11957                                 continue;
11958                         for (n = 0; n < proto_num; n++) {
11959                                 if (proto[n].proto_id != proto_id)
11960                                         continue;
11961                                 strlcat(name, proto[n].name, sizeof(name));
11962                                 strlcat(name, "_", sizeof(name));
11963                                 break;
11964                         }
11965                 }
11966                 name[strlen(name) - 1] = '\0';
11967                 PMD_DRV_LOG(INFO, "name = %s\n", name);
11968                 if (!strcmp(name, "GTPC"))
11969                         new_pctype =
11970                                 i40e_find_customized_pctype(pf,
11971                                                       I40E_CUSTOMIZED_GTPC);
11972                 else if (!strcmp(name, "GTPU_IPV4"))
11973                         new_pctype =
11974                                 i40e_find_customized_pctype(pf,
11975                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11976                 else if (!strcmp(name, "GTPU_IPV6"))
11977                         new_pctype =
11978                                 i40e_find_customized_pctype(pf,
11979                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11980                 else if (!strcmp(name, "GTPU"))
11981                         new_pctype =
11982                                 i40e_find_customized_pctype(pf,
11983                                                       I40E_CUSTOMIZED_GTPU);
11984                 else if (!strcmp(name, "IPV4_L2TPV3"))
11985                         new_pctype =
11986                                 i40e_find_customized_pctype(pf,
11987                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
11988                 else if (!strcmp(name, "IPV6_L2TPV3"))
11989                         new_pctype =
11990                                 i40e_find_customized_pctype(pf,
11991                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
11992                 else if (!strcmp(name, "IPV4_ESP"))
11993                         new_pctype =
11994                                 i40e_find_customized_pctype(pf,
11995                                                 I40E_CUSTOMIZED_ESP_IPV4);
11996                 else if (!strcmp(name, "IPV6_ESP"))
11997                         new_pctype =
11998                                 i40e_find_customized_pctype(pf,
11999                                                 I40E_CUSTOMIZED_ESP_IPV6);
12000                 else if (!strcmp(name, "IPV4_UDP_ESP"))
12001                         new_pctype =
12002                                 i40e_find_customized_pctype(pf,
12003                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
12004                 else if (!strcmp(name, "IPV6_UDP_ESP"))
12005                         new_pctype =
12006                                 i40e_find_customized_pctype(pf,
12007                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
12008                 else if (!strcmp(name, "IPV4_AH"))
12009                         new_pctype =
12010                                 i40e_find_customized_pctype(pf,
12011                                                 I40E_CUSTOMIZED_AH_IPV4);
12012                 else if (!strcmp(name, "IPV6_AH"))
12013                         new_pctype =
12014                                 i40e_find_customized_pctype(pf,
12015                                                 I40E_CUSTOMIZED_AH_IPV6);
12016                 if (new_pctype) {
12017                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
12018                                 new_pctype->pctype = pctype_value;
12019                                 new_pctype->valid = true;
12020                         } else {
12021                                 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
12022                                 new_pctype->valid = false;
12023                         }
12024                 }
12025         }
12026
12027         rte_free(pctype);
12028         return 0;
12029 }
12030
12031 static int
12032 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
12033                              uint32_t pkg_size, uint32_t proto_num,
12034                              struct rte_pmd_i40e_proto_info *proto,
12035                              enum rte_pmd_i40e_package_op op)
12036 {
12037         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
12038         uint16_t port_id = dev->data->port_id;
12039         uint32_t ptype_num;
12040         struct rte_pmd_i40e_ptype_info *ptype;
12041         uint32_t buff_size;
12042         uint8_t proto_id;
12043         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
12044         uint32_t i, j, n;
12045         bool in_tunnel;
12046         int ret;
12047
12048         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12049             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12050                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12051                 return -1;
12052         }
12053
12054         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12055                 rte_pmd_i40e_ptype_mapping_reset(port_id);
12056                 return 0;
12057         }
12058
12059         /* get information about new ptype num */
12060         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12061                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
12062                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
12063         if (ret) {
12064                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
12065                 return ret;
12066         }
12067         if (!ptype_num) {
12068                 PMD_DRV_LOG(INFO, "No new ptype added");
12069                 return -1;
12070         }
12071
12072         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
12073         ptype = rte_zmalloc("new_ptype", buff_size, 0);
12074         if (!ptype) {
12075                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12076                 return -1;
12077         }
12078
12079         /* get information about new ptype list */
12080         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12081                                         (uint8_t *)ptype, buff_size,
12082                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
12083         if (ret) {
12084                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
12085                 rte_free(ptype);
12086                 return ret;
12087         }
12088
12089         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
12090         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
12091         if (!ptype_mapping) {
12092                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12093                 rte_free(ptype);
12094                 return -1;
12095         }
12096
12097         /* Update ptype mapping table. */
12098         for (i = 0; i < ptype_num; i++) {
12099                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
12100                 ptype_mapping[i].sw_ptype = 0;
12101                 in_tunnel = false;
12102                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12103                         proto_id = ptype[i].protocols[j];
12104                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12105                                 continue;
12106                         for (n = 0; n < proto_num; n++) {
12107                                 if (proto[n].proto_id != proto_id)
12108                                         continue;
12109                                 memset(name, 0, sizeof(name));
12110                                 strcpy(name, proto[n].name);
12111                                 PMD_DRV_LOG(INFO, "name = %s\n", name);
12112                                 if (!strncasecmp(name, "PPPOE", 5))
12113                                         ptype_mapping[i].sw_ptype |=
12114                                                 RTE_PTYPE_L2_ETHER_PPPOE;
12115                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12116                                          !in_tunnel) {
12117                                         ptype_mapping[i].sw_ptype |=
12118                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12119                                         ptype_mapping[i].sw_ptype |=
12120                                                 RTE_PTYPE_L4_FRAG;
12121                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12122                                            in_tunnel) {
12123                                         ptype_mapping[i].sw_ptype |=
12124                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12125                                         ptype_mapping[i].sw_ptype |=
12126                                                 RTE_PTYPE_INNER_L4_FRAG;
12127                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
12128                                         ptype_mapping[i].sw_ptype |=
12129                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12130                                         in_tunnel = true;
12131                                 } else if (!strncasecmp(name, "IPV4", 4) &&
12132                                            !in_tunnel)
12133                                         ptype_mapping[i].sw_ptype |=
12134                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12135                                 else if (!strncasecmp(name, "IPV4", 4) &&
12136                                          in_tunnel)
12137                                         ptype_mapping[i].sw_ptype |=
12138                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12139                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12140                                          !in_tunnel) {
12141                                         ptype_mapping[i].sw_ptype |=
12142                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12143                                         ptype_mapping[i].sw_ptype |=
12144                                                 RTE_PTYPE_L4_FRAG;
12145                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12146                                            in_tunnel) {
12147                                         ptype_mapping[i].sw_ptype |=
12148                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12149                                         ptype_mapping[i].sw_ptype |=
12150                                                 RTE_PTYPE_INNER_L4_FRAG;
12151                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
12152                                         ptype_mapping[i].sw_ptype |=
12153                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12154                                         in_tunnel = true;
12155                                 } else if (!strncasecmp(name, "IPV6", 4) &&
12156                                            !in_tunnel)
12157                                         ptype_mapping[i].sw_ptype |=
12158                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12159                                 else if (!strncasecmp(name, "IPV6", 4) &&
12160                                          in_tunnel)
12161                                         ptype_mapping[i].sw_ptype |=
12162                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12163                                 else if (!strncasecmp(name, "UDP", 3) &&
12164                                          !in_tunnel)
12165                                         ptype_mapping[i].sw_ptype |=
12166                                                 RTE_PTYPE_L4_UDP;
12167                                 else if (!strncasecmp(name, "UDP", 3) &&
12168                                          in_tunnel)
12169                                         ptype_mapping[i].sw_ptype |=
12170                                                 RTE_PTYPE_INNER_L4_UDP;
12171                                 else if (!strncasecmp(name, "TCP", 3) &&
12172                                          !in_tunnel)
12173                                         ptype_mapping[i].sw_ptype |=
12174                                                 RTE_PTYPE_L4_TCP;
12175                                 else if (!strncasecmp(name, "TCP", 3) &&
12176                                          in_tunnel)
12177                                         ptype_mapping[i].sw_ptype |=
12178                                                 RTE_PTYPE_INNER_L4_TCP;
12179                                 else if (!strncasecmp(name, "SCTP", 4) &&
12180                                          !in_tunnel)
12181                                         ptype_mapping[i].sw_ptype |=
12182                                                 RTE_PTYPE_L4_SCTP;
12183                                 else if (!strncasecmp(name, "SCTP", 4) &&
12184                                          in_tunnel)
12185                                         ptype_mapping[i].sw_ptype |=
12186                                                 RTE_PTYPE_INNER_L4_SCTP;
12187                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12188                                           !strncasecmp(name, "ICMPV6", 6)) &&
12189                                          !in_tunnel)
12190                                         ptype_mapping[i].sw_ptype |=
12191                                                 RTE_PTYPE_L4_ICMP;
12192                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12193                                           !strncasecmp(name, "ICMPV6", 6)) &&
12194                                          in_tunnel)
12195                                         ptype_mapping[i].sw_ptype |=
12196                                                 RTE_PTYPE_INNER_L4_ICMP;
12197                                 else if (!strncasecmp(name, "GTPC", 4)) {
12198                                         ptype_mapping[i].sw_ptype |=
12199                                                 RTE_PTYPE_TUNNEL_GTPC;
12200                                         in_tunnel = true;
12201                                 } else if (!strncasecmp(name, "GTPU", 4)) {
12202                                         ptype_mapping[i].sw_ptype |=
12203                                                 RTE_PTYPE_TUNNEL_GTPU;
12204                                         in_tunnel = true;
12205                                 } else if (!strncasecmp(name, "ESP", 3)) {
12206                                         ptype_mapping[i].sw_ptype |=
12207                                                 RTE_PTYPE_TUNNEL_ESP;
12208                                         in_tunnel = true;
12209                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
12210                                         ptype_mapping[i].sw_ptype |=
12211                                                 RTE_PTYPE_TUNNEL_GRENAT;
12212                                         in_tunnel = true;
12213                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12214                                            !strncasecmp(name, "L2TPV2", 6) ||
12215                                            !strncasecmp(name, "L2TPV3", 6)) {
12216                                         ptype_mapping[i].sw_ptype |=
12217                                                 RTE_PTYPE_TUNNEL_L2TP;
12218                                         in_tunnel = true;
12219                                 }
12220
12221                                 break;
12222                         }
12223                 }
12224         }
12225
12226         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12227                                                 ptype_num, 0);
12228         if (ret)
12229                 PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
12230
12231         rte_free(ptype_mapping);
12232         rte_free(ptype);
12233         return ret;
12234 }
12235
12236 void
12237 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12238                             uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12239 {
12240         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12241         uint32_t proto_num;
12242         struct rte_pmd_i40e_proto_info *proto;
12243         uint32_t buff_size;
12244         uint32_t i;
12245         int ret;
12246
12247         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12248             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12249                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12250                 return;
12251         }
12252
12253         /* get information about protocol number */
12254         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12255                                        (uint8_t *)&proto_num, sizeof(proto_num),
12256                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12257         if (ret) {
12258                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
12259                 return;
12260         }
12261         if (!proto_num) {
12262                 PMD_DRV_LOG(INFO, "No new protocol added");
12263                 return;
12264         }
12265
12266         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12267         proto = rte_zmalloc("new_proto", buff_size, 0);
12268         if (!proto) {
12269                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12270                 return;
12271         }
12272
12273         /* get information about protocol list */
12274         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12275                                         (uint8_t *)proto, buff_size,
12276                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12277         if (ret) {
12278                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
12279                 rte_free(proto);
12280                 return;
12281         }
12282
12283         /* Check if GTP is supported. */
12284         for (i = 0; i < proto_num; i++) {
12285                 if (!strncmp(proto[i].name, "GTP", 3)) {
12286                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12287                                 pf->gtp_support = true;
12288                         else
12289                                 pf->gtp_support = false;
12290                         break;
12291                 }
12292         }
12293
12294         /* Check if ESP is supported. */
12295         for (i = 0; i < proto_num; i++) {
12296                 if (!strncmp(proto[i].name, "ESP", 3)) {
12297                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12298                                 pf->esp_support = true;
12299                         else
12300                                 pf->esp_support = false;
12301                         break;
12302                 }
12303         }
12304
12305         /* Update customized pctype info */
12306         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12307                                             proto_num, proto, op);
12308         if (ret)
12309                 PMD_DRV_LOG(INFO, "No pctype is updated.");
12310
12311         /* Update customized ptype info */
12312         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12313                                            proto_num, proto, op);
12314         if (ret)
12315                 PMD_DRV_LOG(INFO, "No ptype is updated.");
12316
12317         rte_free(proto);
12318 }
12319
12320 /* Create a QinQ cloud filter
12321  *
12322  * The Fortville NIC has limited resources for tunnel filters,
12323  * so we can only reuse existing filters.
12324  *
12325  * In step 1 we define which Field Vector fields can be used for
12326  * filter types.
12327  * As we do not have the inner tag defined as a field,
12328  * we have to define it first, by reusing one of L1 entries.
12329  *
12330  * In step 2 we are replacing one of existing filter types with
12331  * a new one for QinQ.
12332  * As we reusing L1 and replacing L2, some of the default filter
12333  * types will disappear,which depends on L1 and L2 entries we reuse.
12334  *
12335  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12336  *
12337  * 1.   Create L1 filter of outer vlan (12b) which will be in use
12338  *              later when we define the cloud filter.
12339  *      a.      Valid_flags.replace_cloud = 0
12340  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
12341  *      c.      New_filter = 0x10
12342  *      d.      TR bit = 0xff (optional, not used here)
12343  *      e.      Buffer – 2 entries:
12344  *              i.      Byte 0 = 8 (outer vlan FV index).
12345  *                      Byte 1 = 0 (rsv)
12346  *                      Byte 2-3 = 0x0fff
12347  *              ii.     Byte 0 = 37 (inner vlan FV index).
12348  *                      Byte 1 =0 (rsv)
12349  *                      Byte 2-3 = 0x0fff
12350  *
12351  * Step 2:
12352  * 2.   Create cloud filter using two L1 filters entries: stag and
12353  *              new filter(outer vlan+ inner vlan)
12354  *      a.      Valid_flags.replace_cloud = 1
12355  *      b.      Old_filter = 1 (instead of outer IP)
12356  *      c.      New_filter = 0x10
12357  *      d.      Buffer – 2 entries:
12358  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
12359  *                      Byte 1-3 = 0 (rsv)
12360  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12361  *                      Byte 9-11 = 0 (rsv)
12362  */
12363 static int
12364 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12365 {
12366         int ret = -ENOTSUP;
12367         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12368         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12369         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12370         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
12371
12372         if (pf->support_multi_driver) {
12373                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12374                 return ret;
12375         }
12376
12377         /* Init */
12378         memset(&filter_replace, 0,
12379                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12380         memset(&filter_replace_buf, 0,
12381                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12382
12383         /* create L1 filter */
12384         filter_replace.old_filter_type =
12385                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12386         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12387         filter_replace.tr_bit = 0;
12388
12389         /* Prepare the buffer, 2 entries */
12390         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12391         filter_replace_buf.data[0] |=
12392                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12393         /* Field Vector 12b mask */
12394         filter_replace_buf.data[2] = 0xff;
12395         filter_replace_buf.data[3] = 0x0f;
12396         filter_replace_buf.data[4] =
12397                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12398         filter_replace_buf.data[4] |=
12399                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12400         /* Field Vector 12b mask */
12401         filter_replace_buf.data[6] = 0xff;
12402         filter_replace_buf.data[7] = 0x0f;
12403         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12404                         &filter_replace_buf);
12405         if (ret != I40E_SUCCESS)
12406                 return ret;
12407
12408         if (filter_replace.old_filter_type !=
12409             filter_replace.new_filter_type)
12410                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
12411                             " original: 0x%x, new: 0x%x",
12412                             dev->device->name,
12413                             filter_replace.old_filter_type,
12414                             filter_replace.new_filter_type);
12415
12416         /* Apply the second L2 cloud filter */
12417         memset(&filter_replace, 0,
12418                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12419         memset(&filter_replace_buf, 0,
12420                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12421
12422         /* create L2 filter, input for L2 filter will be L1 filter  */
12423         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12424         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12425         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12426
12427         /* Prepare the buffer, 2 entries */
12428         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12429         filter_replace_buf.data[0] |=
12430                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12431         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12432         filter_replace_buf.data[4] |=
12433                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12434         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12435                         &filter_replace_buf);
12436         if (!ret && (filter_replace.old_filter_type !=
12437                      filter_replace.new_filter_type))
12438                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
12439                             " original: 0x%x, new: 0x%x",
12440                             dev->device->name,
12441                             filter_replace.old_filter_type,
12442                             filter_replace.new_filter_type);
12443
12444         return ret;
12445 }
12446
12447 RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE);
12448 RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE);
12449 #ifdef RTE_ETHDEV_DEBUG_RX
12450 RTE_LOG_REGISTER(i40e_logtype_rx, pmd.net.i40e.rx, DEBUG);
12451 #endif
12452 #ifdef RTE_ETHDEV_DEBUG_TX
12453 RTE_LOG_REGISTER(i40e_logtype_tx, pmd.net.i40e.tx, DEBUG);
12454 #endif
12455
12456 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12457                               ETH_I40E_FLOATING_VEB_ARG "=1"
12458                               ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
12459                               ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12460                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1");