ethdev: validate input in module EEPROM dump
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29 #include <rte_bitmap.h>
30
31 #include "i40e_logs.h"
32 #include "base/i40e_prototype.h"
33 #include "base/i40e_adminq_cmd.h"
34 #include "base/i40e_type.h"
35 #include "base/i40e_register.h"
36 #include "base/i40e_dcb.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42 #include "i40e_hash.h"
43
44 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
45 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
46 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
47 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG   "queue-num-per-vf"
48 #define ETH_I40E_VF_MSG_CFG             "vf_msg_cfg"
49
50 #define I40E_CLEAR_PXE_WAIT_MS     200
51 #define I40E_VSI_TSR_QINQ_STRIP         0x4010
52 #define I40E_VSI_TSR(_i)        (0x00050800 + ((_i) * 4))
53
54 /* Maximun number of capability elements */
55 #define I40E_MAX_CAP_ELE_NUM       128
56
57 /* Wait count and interval */
58 #define I40E_CHK_Q_ENA_COUNT       1000
59 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
60
61 /* Maximun number of VSI */
62 #define I40E_MAX_NUM_VSIS          (384UL)
63
64 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
65
66 /* Flow control default timer */
67 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
68
69 /* Flow control enable fwd bit */
70 #define I40E_PRTMAC_FWD_CTRL   0x00000001
71
72 /* Receive Packet Buffer size */
73 #define I40E_RXPBSIZE (968 * 1024)
74
75 /* Kilobytes shift */
76 #define I40E_KILOSHIFT 10
77
78 /* Flow control default high water */
79 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
80
81 /* Flow control default low water */
82 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
83
84 /* Receive Average Packet Size in Byte*/
85 #define I40E_PACKET_AVERAGE_SIZE 128
86
87 /* Mask of PF interrupt causes */
88 #define I40E_PFINT_ICR0_ENA_MASK ( \
89                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
90                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
91                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
92                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
93                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
94                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
95                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
96                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
97                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
98
99 #define I40E_FLOW_TYPES ( \
100         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
104         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
105         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
106         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
107         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
108         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
109         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
110         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
111
112 /* Additional timesync values. */
113 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
114 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
115 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
116 #define I40E_PRTTSYN_TSYNENA     0x80000000
117 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
118 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
119
120 /**
121  * Below are values for writing un-exposed registers suggested
122  * by silicon experts
123  */
124 /* Destination MAC address */
125 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
126 /* Source MAC address */
127 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
128 /* Outer (S-Tag) VLAN tag in the outer L2 header */
129 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
130 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
131 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
132 /* Single VLAN tag in the inner L2 header */
133 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
134 /* Source IPv4 address */
135 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
136 /* Destination IPv4 address */
137 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
138 /* Source IPv4 address for X722 */
139 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
140 /* Destination IPv4 address for X722 */
141 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
142 /* IPv4 Protocol for X722 */
143 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
144 /* IPv4 Time to Live for X722 */
145 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
146 /* IPv4 Type of Service (TOS) */
147 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
148 /* IPv4 Protocol */
149 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
150 /* IPv4 Time to Live */
151 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
152 /* Source IPv6 address */
153 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
154 /* Destination IPv6 address */
155 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
156 /* IPv6 Traffic Class (TC) */
157 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
158 /* IPv6 Next Header */
159 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
160 /* IPv6 Hop Limit */
161 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
162 /* Source L4 port */
163 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
164 /* Destination L4 port */
165 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
166 /* SCTP verification tag */
167 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
168 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
169 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
170 /* Source port of tunneling UDP */
171 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
172 /* Destination port of tunneling UDP */
173 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
174 /* UDP Tunneling ID, NVGRE/GRE key */
175 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
176 /* Last ether type */
177 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
178 /* Tunneling outer destination IPv4 address */
179 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
180 /* Tunneling outer destination IPv6 address */
181 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
182 /* 1st word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
184 /* 2nd word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
186 /* 3rd word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
188 /* 4th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
190 /* 5th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
192 /* 6th word of flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
194 /* 7th word of flex payload */
195 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
196 /* 8th word of flex payload */
197 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
198 /* all 8 words flex payload */
199 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
200 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
201
202 #define I40E_TRANSLATE_INSET 0
203 #define I40E_TRANSLATE_REG   1
204
205 #define I40E_INSET_IPV4_TOS_MASK        0x0000FF00UL
206 #define I40E_INSET_IPV4_TTL_MASK        0x000000FFUL
207 #define I40E_INSET_IPV4_PROTO_MASK      0x0000FF00UL
208 #define I40E_INSET_IPV6_TC_MASK         0x0000F00FUL
209 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x0000FF00UL
210 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000000FFUL
211
212 /* PCI offset for querying capability */
213 #define PCI_DEV_CAP_REG            0xA4
214 /* PCI offset for enabling/disabling Extended Tag */
215 #define PCI_DEV_CTRL_REG           0xA8
216 /* Bit mask of Extended Tag capability */
217 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
218 /* Bit shift of Extended Tag enable/disable */
219 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
220 /* Bit mask of Extended Tag enable/disable */
221 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
222
223 #define I40E_GLQF_PIT_IPV4_START        2
224 #define I40E_GLQF_PIT_IPV4_COUNT        2
225 #define I40E_GLQF_PIT_IPV6_START        4
226 #define I40E_GLQF_PIT_IPV6_COUNT        2
227
228 #define I40E_GLQF_PIT_SOURCE_OFF_GET(a) \
229                                 (((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \
230                                  I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
231
232 #define I40E_GLQF_PIT_DEST_OFF_GET(a) \
233                                 (((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \
234                                  I40E_GLQF_PIT_DEST_OFF_SHIFT)
235
236 #define I40E_GLQF_PIT_FSIZE_GET(a)      (((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \
237                                          I40E_GLQF_PIT_FSIZE_SHIFT)
238
239 #define I40E_GLQF_PIT_BUILD(off, mask)  (((off) << 16) | (mask))
240 #define I40E_FDIR_FIELD_OFFSET(a)       ((a) >> 1)
241
242 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
243 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
244 static int i40e_dev_configure(struct rte_eth_dev *dev);
245 static int i40e_dev_start(struct rte_eth_dev *dev);
246 static int i40e_dev_stop(struct rte_eth_dev *dev);
247 static int i40e_dev_close(struct rte_eth_dev *dev);
248 static int  i40e_dev_reset(struct rte_eth_dev *dev);
249 static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
250 static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
251 static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
252 static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
253 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
254 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
255 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
256                                struct rte_eth_stats *stats);
257 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
258                                struct rte_eth_xstat *xstats, unsigned n);
259 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
260                                      struct rte_eth_xstat_name *xstats_names,
261                                      unsigned limit);
262 static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
263 static int i40e_fw_version_get(struct rte_eth_dev *dev,
264                                 char *fw_version, size_t fw_size);
265 static int i40e_dev_info_get(struct rte_eth_dev *dev,
266                              struct rte_eth_dev_info *dev_info);
267 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
268                                 uint16_t vlan_id,
269                                 int on);
270 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
271                               enum rte_vlan_type vlan_type,
272                               uint16_t tpid);
273 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
274 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
275                                       uint16_t queue,
276                                       int on);
277 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
278 static int i40e_dev_led_on(struct rte_eth_dev *dev);
279 static int i40e_dev_led_off(struct rte_eth_dev *dev);
280 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
281                               struct rte_eth_fc_conf *fc_conf);
282 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
283                               struct rte_eth_fc_conf *fc_conf);
284 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
285                                        struct rte_eth_pfc_conf *pfc_conf);
286 static int i40e_macaddr_add(struct rte_eth_dev *dev,
287                             struct rte_ether_addr *mac_addr,
288                             uint32_t index,
289                             uint32_t pool);
290 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
291 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
292                                     struct rte_eth_rss_reta_entry64 *reta_conf,
293                                     uint16_t reta_size);
294 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
295                                    struct rte_eth_rss_reta_entry64 *reta_conf,
296                                    uint16_t reta_size);
297
298 static int i40e_get_cap(struct i40e_hw *hw);
299 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
300 static int i40e_pf_setup(struct i40e_pf *pf);
301 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
302 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
303 static int i40e_dcb_setup(struct rte_eth_dev *dev);
304 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
305                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
306 static void i40e_stat_update_48(struct i40e_hw *hw,
307                                uint32_t hireg,
308                                uint32_t loreg,
309                                bool offset_loaded,
310                                uint64_t *offset,
311                                uint64_t *stat);
312 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
313 static void i40e_dev_interrupt_handler(void *param);
314 static void i40e_dev_alarm_handler(void *param);
315 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
316                                 uint32_t base, uint32_t num);
317 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
318 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
319                         uint32_t base);
320 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
321                         uint16_t num);
322 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
323 static int i40e_veb_release(struct i40e_veb *veb);
324 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
325                                                 struct i40e_vsi *vsi);
326 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
327 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
328                                              struct i40e_macvlan_filter *mv_f,
329                                              int num,
330                                              uint16_t vlan);
331 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
332 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
333                                     struct rte_eth_rss_conf *rss_conf);
334 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
335                                       struct rte_eth_rss_conf *rss_conf);
336 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
337                                         struct rte_eth_udp_tunnel *udp_tunnel);
338 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
339                                         struct rte_eth_udp_tunnel *udp_tunnel);
340 static void i40e_filter_input_set_init(struct i40e_pf *pf);
341 static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
342                                  const struct rte_flow_ops **ops);
343 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
344                                   struct rte_eth_dcb_info *dcb_info);
345 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
346 static void i40e_configure_registers(struct i40e_hw *hw);
347 static void i40e_hw_init(struct rte_eth_dev *dev);
348 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
349 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
350                                                      uint16_t seid,
351                                                      uint16_t rule_type,
352                                                      uint16_t *entries,
353                                                      uint16_t count,
354                                                      uint16_t rule_id);
355 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
356                         struct rte_eth_mirror_conf *mirror_conf,
357                         uint8_t sw_id, uint8_t on);
358 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
359
360 static int i40e_timesync_enable(struct rte_eth_dev *dev);
361 static int i40e_timesync_disable(struct rte_eth_dev *dev);
362 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
363                                            struct timespec *timestamp,
364                                            uint32_t flags);
365 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
366                                            struct timespec *timestamp);
367 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
368
369 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
370
371 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
372                                    struct timespec *timestamp);
373 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
374                                     const struct timespec *timestamp);
375
376 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
377                                          uint16_t queue_id);
378 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
379                                           uint16_t queue_id);
380
381 static int i40e_get_regs(struct rte_eth_dev *dev,
382                          struct rte_dev_reg_info *regs);
383
384 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
385
386 static int i40e_get_eeprom(struct rte_eth_dev *dev,
387                            struct rte_dev_eeprom_info *eeprom);
388
389 static int i40e_get_module_info(struct rte_eth_dev *dev,
390                                 struct rte_eth_dev_module_info *modinfo);
391 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
392                                   struct rte_dev_eeprom_info *info);
393
394 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
395                                       struct rte_ether_addr *mac_addr);
396
397 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
398
399 static int i40e_ethertype_filter_convert(
400         const struct rte_eth_ethertype_filter *input,
401         struct i40e_ethertype_filter *filter);
402 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
403                                    struct i40e_ethertype_filter *filter);
404
405 static int i40e_tunnel_filter_convert(
406         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
407         struct i40e_tunnel_filter *tunnel_filter);
408 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
409                                 struct i40e_tunnel_filter *tunnel_filter);
410 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
411
412 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
413 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
414 static void i40e_filter_restore(struct i40e_pf *pf);
415 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
416
417 static const char *const valid_keys[] = {
418         ETH_I40E_FLOATING_VEB_ARG,
419         ETH_I40E_FLOATING_VEB_LIST_ARG,
420         ETH_I40E_SUPPORT_MULTI_DRIVER,
421         ETH_I40E_QUEUE_NUM_PER_VF_ARG,
422         ETH_I40E_VF_MSG_CFG,
423         NULL};
424
425 static const struct rte_pci_id pci_id_i40e_map[] = {
426         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
427         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
428         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
429         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
430         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
431         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
432         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
433         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
434         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
435         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
436         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
437         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
438         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
439         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
440         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
441         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
442         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
443         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
444         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
445         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
446         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
447         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
448         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
449         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
450         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
451         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
452         { .vendor_id = 0, /* sentinel */ },
453 };
454
455 static const struct eth_dev_ops i40e_eth_dev_ops = {
456         .dev_configure                = i40e_dev_configure,
457         .dev_start                    = i40e_dev_start,
458         .dev_stop                     = i40e_dev_stop,
459         .dev_close                    = i40e_dev_close,
460         .dev_reset                    = i40e_dev_reset,
461         .promiscuous_enable           = i40e_dev_promiscuous_enable,
462         .promiscuous_disable          = i40e_dev_promiscuous_disable,
463         .allmulticast_enable          = i40e_dev_allmulticast_enable,
464         .allmulticast_disable         = i40e_dev_allmulticast_disable,
465         .dev_set_link_up              = i40e_dev_set_link_up,
466         .dev_set_link_down            = i40e_dev_set_link_down,
467         .link_update                  = i40e_dev_link_update,
468         .stats_get                    = i40e_dev_stats_get,
469         .xstats_get                   = i40e_dev_xstats_get,
470         .xstats_get_names             = i40e_dev_xstats_get_names,
471         .stats_reset                  = i40e_dev_stats_reset,
472         .xstats_reset                 = i40e_dev_stats_reset,
473         .fw_version_get               = i40e_fw_version_get,
474         .dev_infos_get                = i40e_dev_info_get,
475         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
476         .vlan_filter_set              = i40e_vlan_filter_set,
477         .vlan_tpid_set                = i40e_vlan_tpid_set,
478         .vlan_offload_set             = i40e_vlan_offload_set,
479         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
480         .vlan_pvid_set                = i40e_vlan_pvid_set,
481         .rx_queue_start               = i40e_dev_rx_queue_start,
482         .rx_queue_stop                = i40e_dev_rx_queue_stop,
483         .tx_queue_start               = i40e_dev_tx_queue_start,
484         .tx_queue_stop                = i40e_dev_tx_queue_stop,
485         .rx_queue_setup               = i40e_dev_rx_queue_setup,
486         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
487         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
488         .rx_queue_release             = i40e_dev_rx_queue_release,
489         .tx_queue_setup               = i40e_dev_tx_queue_setup,
490         .tx_queue_release             = i40e_dev_tx_queue_release,
491         .dev_led_on                   = i40e_dev_led_on,
492         .dev_led_off                  = i40e_dev_led_off,
493         .flow_ctrl_get                = i40e_flow_ctrl_get,
494         .flow_ctrl_set                = i40e_flow_ctrl_set,
495         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
496         .mac_addr_add                 = i40e_macaddr_add,
497         .mac_addr_remove              = i40e_macaddr_remove,
498         .reta_update                  = i40e_dev_rss_reta_update,
499         .reta_query                   = i40e_dev_rss_reta_query,
500         .rss_hash_update              = i40e_dev_rss_hash_update,
501         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
502         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
503         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
504         .flow_ops_get                 = i40e_dev_flow_ops_get,
505         .rxq_info_get                 = i40e_rxq_info_get,
506         .txq_info_get                 = i40e_txq_info_get,
507         .rx_burst_mode_get            = i40e_rx_burst_mode_get,
508         .tx_burst_mode_get            = i40e_tx_burst_mode_get,
509         .mirror_rule_set              = i40e_mirror_rule_set,
510         .mirror_rule_reset            = i40e_mirror_rule_reset,
511         .timesync_enable              = i40e_timesync_enable,
512         .timesync_disable             = i40e_timesync_disable,
513         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
514         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
515         .get_dcb_info                 = i40e_dev_get_dcb_info,
516         .timesync_adjust_time         = i40e_timesync_adjust_time,
517         .timesync_read_time           = i40e_timesync_read_time,
518         .timesync_write_time          = i40e_timesync_write_time,
519         .get_reg                      = i40e_get_regs,
520         .get_eeprom_length            = i40e_get_eeprom_length,
521         .get_eeprom                   = i40e_get_eeprom,
522         .get_module_info              = i40e_get_module_info,
523         .get_module_eeprom            = i40e_get_module_eeprom,
524         .mac_addr_set                 = i40e_set_default_mac_addr,
525         .mtu_set                      = i40e_dev_mtu_set,
526         .tm_ops_get                   = i40e_tm_ops_get,
527         .tx_done_cleanup              = i40e_tx_done_cleanup,
528         .get_monitor_addr             = i40e_get_monitor_addr,
529 };
530
531 /* store statistics names and its offset in stats structure */
532 struct rte_i40e_xstats_name_off {
533         char name[RTE_ETH_XSTATS_NAME_SIZE];
534         unsigned offset;
535 };
536
537 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
538         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
539         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
540         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
541         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
542         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
543                 rx_unknown_protocol)},
544         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
545         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
546         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
547         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
548 };
549
550 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
551                 sizeof(rte_i40e_stats_strings[0]))
552
553 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
554         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
555                 tx_dropped_link_down)},
556         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
557         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
558                 illegal_bytes)},
559         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
560         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
561                 mac_local_faults)},
562         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
563                 mac_remote_faults)},
564         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
565                 rx_length_errors)},
566         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
567         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
568         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
569         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
570         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
571         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
572                 rx_size_127)},
573         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
574                 rx_size_255)},
575         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
576                 rx_size_511)},
577         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
578                 rx_size_1023)},
579         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
580                 rx_size_1522)},
581         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
582                 rx_size_big)},
583         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
584                 rx_undersize)},
585         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
586                 rx_oversize)},
587         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
588                 mac_short_packet_dropped)},
589         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
590                 rx_fragments)},
591         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
592         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
593         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
594                 tx_size_127)},
595         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
596                 tx_size_255)},
597         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
598                 tx_size_511)},
599         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
600                 tx_size_1023)},
601         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
602                 tx_size_1522)},
603         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
604                 tx_size_big)},
605         {"rx_flow_director_atr_match_packets",
606                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
607         {"rx_flow_director_sb_match_packets",
608                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
609         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
610                 tx_lpi_status)},
611         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
612                 rx_lpi_status)},
613         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
614                 tx_lpi_count)},
615         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
616                 rx_lpi_count)},
617 };
618
619 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
620                 sizeof(rte_i40e_hw_port_strings[0]))
621
622 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
623         {"xon_packets", offsetof(struct i40e_hw_port_stats,
624                 priority_xon_rx)},
625         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
626                 priority_xoff_rx)},
627 };
628
629 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
630                 sizeof(rte_i40e_rxq_prio_strings[0]))
631
632 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
633         {"xon_packets", offsetof(struct i40e_hw_port_stats,
634                 priority_xon_tx)},
635         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
636                 priority_xoff_tx)},
637         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
638                 priority_xon_2_xoff)},
639 };
640
641 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
642                 sizeof(rte_i40e_txq_prio_strings[0]))
643
644 static int
645 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
646         struct rte_pci_device *pci_dev)
647 {
648         char name[RTE_ETH_NAME_MAX_LEN];
649         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
650         int i, retval;
651
652         if (pci_dev->device.devargs) {
653                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
654                                 &eth_da);
655                 if (retval)
656                         return retval;
657         }
658
659         if (eth_da.nb_representor_ports > 0 &&
660             eth_da.type != RTE_ETH_REPRESENTOR_VF) {
661                 PMD_DRV_LOG(ERR, "unsupported representor type: %s\n",
662                             pci_dev->device.devargs->args);
663                 return -ENOTSUP;
664         }
665
666         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
667                 sizeof(struct i40e_adapter),
668                 eth_dev_pci_specific_init, pci_dev,
669                 eth_i40e_dev_init, NULL);
670
671         if (retval || eth_da.nb_representor_ports < 1)
672                 return retval;
673
674         /* probe VF representor ports */
675         struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
676                 pci_dev->device.name);
677
678         if (pf_ethdev == NULL)
679                 return -ENODEV;
680
681         for (i = 0; i < eth_da.nb_representor_ports; i++) {
682                 struct i40e_vf_representor representor = {
683                         .vf_id = eth_da.representor_ports[i],
684                         .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
685                                 pf_ethdev->data->dev_private)->switch_domain_id,
686                         .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
687                                 pf_ethdev->data->dev_private)
688                 };
689
690                 /* representor port net_bdf_port */
691                 snprintf(name, sizeof(name), "net_%s_representor_%d",
692                         pci_dev->device.name, eth_da.representor_ports[i]);
693
694                 retval = rte_eth_dev_create(&pci_dev->device, name,
695                         sizeof(struct i40e_vf_representor), NULL, NULL,
696                         i40e_vf_representor_init, &representor);
697
698                 if (retval)
699                         PMD_DRV_LOG(ERR, "failed to create i40e vf "
700                                 "representor %s.", name);
701         }
702
703         return 0;
704 }
705
706 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
707 {
708         struct rte_eth_dev *ethdev;
709
710         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
711         if (!ethdev)
712                 return 0;
713
714         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
715                 return rte_eth_dev_pci_generic_remove(pci_dev,
716                                         i40e_vf_representor_uninit);
717         else
718                 return rte_eth_dev_pci_generic_remove(pci_dev,
719                                                 eth_i40e_dev_uninit);
720 }
721
722 static struct rte_pci_driver rte_i40e_pmd = {
723         .id_table = pci_id_i40e_map,
724         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
725         .probe = eth_i40e_pci_probe,
726         .remove = eth_i40e_pci_remove,
727 };
728
729 static inline void
730 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
731                          uint32_t reg_val)
732 {
733         uint32_t ori_reg_val;
734         struct rte_eth_dev *dev;
735
736         ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
737         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
738         i40e_write_rx_ctl(hw, reg_addr, reg_val);
739         if (ori_reg_val != reg_val)
740                 PMD_DRV_LOG(WARNING,
741                             "i40e device %s changed global register [0x%08x]."
742                             " original: 0x%08x, new: 0x%08x",
743                             dev->device->name, reg_addr, ori_reg_val, reg_val);
744 }
745
746 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
747 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
748 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
749
750 #ifndef I40E_GLQF_ORT
751 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
752 #endif
753 #ifndef I40E_GLQF_PIT
754 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
755 #endif
756 #ifndef I40E_GLQF_L3_MAP
757 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
758 #endif
759
760 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
761 {
762         /*
763          * Initialize registers for parsing packet type of QinQ
764          * This should be removed from code once proper
765          * configuration API is added to avoid configuration conflicts
766          * between ports of the same device.
767          */
768         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
769         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
770 }
771
772 static inline void i40e_config_automask(struct i40e_pf *pf)
773 {
774         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
775         uint32_t val;
776
777         /* INTENA flag is not auto-cleared for interrupt */
778         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
779         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
780                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
781
782         /* If support multi-driver, PF will use INT0. */
783         if (!pf->support_multi_driver)
784                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
785
786         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
787 }
788
789 static inline void i40e_clear_automask(struct i40e_pf *pf)
790 {
791         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
792         uint32_t val;
793
794         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
795         val &= ~(I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
796                  I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK);
797
798         if (!pf->support_multi_driver)
799                 val &= ~I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
800
801         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
802 }
803
804 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
805
806 /*
807  * Add a ethertype filter to drop all flow control frames transmitted
808  * from VSIs.
809 */
810 static void
811 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
812 {
813         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
814         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
815                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
816                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
817         int ret;
818
819         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
820                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
821                                 pf->main_vsi_seid, 0,
822                                 TRUE, NULL, NULL);
823         if (ret)
824                 PMD_INIT_LOG(ERR,
825                         "Failed to add filter to drop flow control frames from VSIs.");
826 }
827
828 static int
829 floating_veb_list_handler(__rte_unused const char *key,
830                           const char *floating_veb_value,
831                           void *opaque)
832 {
833         int idx = 0;
834         unsigned int count = 0;
835         char *end = NULL;
836         int min, max;
837         bool *vf_floating_veb = opaque;
838
839         while (isblank(*floating_veb_value))
840                 floating_veb_value++;
841
842         /* Reset floating VEB configuration for VFs */
843         for (idx = 0; idx < I40E_MAX_VF; idx++)
844                 vf_floating_veb[idx] = false;
845
846         min = I40E_MAX_VF;
847         do {
848                 while (isblank(*floating_veb_value))
849                         floating_veb_value++;
850                 if (*floating_veb_value == '\0')
851                         return -1;
852                 errno = 0;
853                 idx = strtoul(floating_veb_value, &end, 10);
854                 if (errno || end == NULL)
855                         return -1;
856                 while (isblank(*end))
857                         end++;
858                 if (*end == '-') {
859                         min = idx;
860                 } else if ((*end == ';') || (*end == '\0')) {
861                         max = idx;
862                         if (min == I40E_MAX_VF)
863                                 min = idx;
864                         if (max >= I40E_MAX_VF)
865                                 max = I40E_MAX_VF - 1;
866                         for (idx = min; idx <= max; idx++) {
867                                 vf_floating_veb[idx] = true;
868                                 count++;
869                         }
870                         min = I40E_MAX_VF;
871                 } else {
872                         return -1;
873                 }
874                 floating_veb_value = end + 1;
875         } while (*end != '\0');
876
877         if (count == 0)
878                 return -1;
879
880         return 0;
881 }
882
883 static void
884 config_vf_floating_veb(struct rte_devargs *devargs,
885                        uint16_t floating_veb,
886                        bool *vf_floating_veb)
887 {
888         struct rte_kvargs *kvlist;
889         int i;
890         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
891
892         if (!floating_veb)
893                 return;
894         /* All the VFs attach to the floating VEB by default
895          * when the floating VEB is enabled.
896          */
897         for (i = 0; i < I40E_MAX_VF; i++)
898                 vf_floating_veb[i] = true;
899
900         if (devargs == NULL)
901                 return;
902
903         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
904         if (kvlist == NULL)
905                 return;
906
907         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
908                 rte_kvargs_free(kvlist);
909                 return;
910         }
911         /* When the floating_veb_list parameter exists, all the VFs
912          * will attach to the legacy VEB firstly, then configure VFs
913          * to the floating VEB according to the floating_veb_list.
914          */
915         if (rte_kvargs_process(kvlist, floating_veb_list,
916                                floating_veb_list_handler,
917                                vf_floating_veb) < 0) {
918                 rte_kvargs_free(kvlist);
919                 return;
920         }
921         rte_kvargs_free(kvlist);
922 }
923
924 static int
925 i40e_check_floating_handler(__rte_unused const char *key,
926                             const char *value,
927                             __rte_unused void *opaque)
928 {
929         if (strcmp(value, "1"))
930                 return -1;
931
932         return 0;
933 }
934
935 static int
936 is_floating_veb_supported(struct rte_devargs *devargs)
937 {
938         struct rte_kvargs *kvlist;
939         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
940
941         if (devargs == NULL)
942                 return 0;
943
944         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
945         if (kvlist == NULL)
946                 return 0;
947
948         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
949                 rte_kvargs_free(kvlist);
950                 return 0;
951         }
952         /* Floating VEB is enabled when there's key-value:
953          * enable_floating_veb=1
954          */
955         if (rte_kvargs_process(kvlist, floating_veb_key,
956                                i40e_check_floating_handler, NULL) < 0) {
957                 rte_kvargs_free(kvlist);
958                 return 0;
959         }
960         rte_kvargs_free(kvlist);
961
962         return 1;
963 }
964
965 static void
966 config_floating_veb(struct rte_eth_dev *dev)
967 {
968         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
969         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
970         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
971
972         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
973
974         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
975                 pf->floating_veb =
976                         is_floating_veb_supported(pci_dev->device.devargs);
977                 config_vf_floating_veb(pci_dev->device.devargs,
978                                        pf->floating_veb,
979                                        pf->floating_veb_list);
980         } else {
981                 pf->floating_veb = false;
982         }
983 }
984
985 #define I40E_L2_TAGS_S_TAG_SHIFT 1
986 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
987
988 static int
989 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
990 {
991         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
992         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
993         char ethertype_hash_name[RTE_HASH_NAMESIZE];
994         int ret;
995
996         struct rte_hash_parameters ethertype_hash_params = {
997                 .name = ethertype_hash_name,
998                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
999                 .key_len = sizeof(struct i40e_ethertype_filter_input),
1000                 .hash_func = rte_hash_crc,
1001                 .hash_func_init_val = 0,
1002                 .socket_id = rte_socket_id(),
1003         };
1004
1005         /* Initialize ethertype filter rule list and hash */
1006         TAILQ_INIT(&ethertype_rule->ethertype_list);
1007         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
1008                  "ethertype_%s", dev->device->name);
1009         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
1010         if (!ethertype_rule->hash_table) {
1011                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
1012                 return -EINVAL;
1013         }
1014         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
1015                                        sizeof(struct i40e_ethertype_filter *) *
1016                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
1017                                        0);
1018         if (!ethertype_rule->hash_map) {
1019                 PMD_INIT_LOG(ERR,
1020                              "Failed to allocate memory for ethertype hash map!");
1021                 ret = -ENOMEM;
1022                 goto err_ethertype_hash_map_alloc;
1023         }
1024
1025         return 0;
1026
1027 err_ethertype_hash_map_alloc:
1028         rte_hash_free(ethertype_rule->hash_table);
1029
1030         return ret;
1031 }
1032
1033 static int
1034 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1035 {
1036         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1037         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1038         char tunnel_hash_name[RTE_HASH_NAMESIZE];
1039         int ret;
1040
1041         struct rte_hash_parameters tunnel_hash_params = {
1042                 .name = tunnel_hash_name,
1043                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
1044                 .key_len = sizeof(struct i40e_tunnel_filter_input),
1045                 .hash_func = rte_hash_crc,
1046                 .hash_func_init_val = 0,
1047                 .socket_id = rte_socket_id(),
1048         };
1049
1050         /* Initialize tunnel filter rule list and hash */
1051         TAILQ_INIT(&tunnel_rule->tunnel_list);
1052         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1053                  "tunnel_%s", dev->device->name);
1054         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1055         if (!tunnel_rule->hash_table) {
1056                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1057                 return -EINVAL;
1058         }
1059         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1060                                     sizeof(struct i40e_tunnel_filter *) *
1061                                     I40E_MAX_TUNNEL_FILTER_NUM,
1062                                     0);
1063         if (!tunnel_rule->hash_map) {
1064                 PMD_INIT_LOG(ERR,
1065                              "Failed to allocate memory for tunnel hash map!");
1066                 ret = -ENOMEM;
1067                 goto err_tunnel_hash_map_alloc;
1068         }
1069
1070         return 0;
1071
1072 err_tunnel_hash_map_alloc:
1073         rte_hash_free(tunnel_rule->hash_table);
1074
1075         return ret;
1076 }
1077
1078 static int
1079 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1080 {
1081         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1082         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1083         struct i40e_fdir_info *fdir_info = &pf->fdir;
1084         char fdir_hash_name[RTE_HASH_NAMESIZE];
1085         uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1086         uint32_t best = hw->func_caps.fd_filters_best_effort;
1087         struct rte_bitmap *bmp = NULL;
1088         uint32_t bmp_size;
1089         void *mem = NULL;
1090         uint32_t i = 0;
1091         int ret;
1092
1093         struct rte_hash_parameters fdir_hash_params = {
1094                 .name = fdir_hash_name,
1095                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1096                 .key_len = sizeof(struct i40e_fdir_input),
1097                 .hash_func = rte_hash_crc,
1098                 .hash_func_init_val = 0,
1099                 .socket_id = rte_socket_id(),
1100         };
1101
1102         /* Initialize flow director filter rule list and hash */
1103         TAILQ_INIT(&fdir_info->fdir_list);
1104         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1105                  "fdir_%s", dev->device->name);
1106         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1107         if (!fdir_info->hash_table) {
1108                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1109                 return -EINVAL;
1110         }
1111
1112         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1113                                           sizeof(struct i40e_fdir_filter *) *
1114                                           I40E_MAX_FDIR_FILTER_NUM,
1115                                           0);
1116         if (!fdir_info->hash_map) {
1117                 PMD_INIT_LOG(ERR,
1118                              "Failed to allocate memory for fdir hash map!");
1119                 ret = -ENOMEM;
1120                 goto err_fdir_hash_map_alloc;
1121         }
1122
1123         fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1124                         sizeof(struct i40e_fdir_filter) *
1125                         I40E_MAX_FDIR_FILTER_NUM,
1126                         0);
1127
1128         if (!fdir_info->fdir_filter_array) {
1129                 PMD_INIT_LOG(ERR,
1130                              "Failed to allocate memory for fdir filter array!");
1131                 ret = -ENOMEM;
1132                 goto err_fdir_filter_array_alloc;
1133         }
1134
1135         fdir_info->fdir_space_size = alloc + best;
1136         fdir_info->fdir_actual_cnt = 0;
1137         fdir_info->fdir_guarantee_total_space = alloc;
1138         fdir_info->fdir_guarantee_free_space =
1139                 fdir_info->fdir_guarantee_total_space;
1140
1141         PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1142
1143         fdir_info->fdir_flow_pool.pool =
1144                         rte_zmalloc("i40e_fdir_entry",
1145                                 sizeof(struct i40e_fdir_entry) *
1146                                 fdir_info->fdir_space_size,
1147                                 0);
1148
1149         if (!fdir_info->fdir_flow_pool.pool) {
1150                 PMD_INIT_LOG(ERR,
1151                              "Failed to allocate memory for bitmap flow!");
1152                 ret = -ENOMEM;
1153                 goto err_fdir_bitmap_flow_alloc;
1154         }
1155
1156         for (i = 0; i < fdir_info->fdir_space_size; i++)
1157                 fdir_info->fdir_flow_pool.pool[i].idx = i;
1158
1159         bmp_size =
1160                 rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1161         mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1162         if (mem == NULL) {
1163                 PMD_INIT_LOG(ERR,
1164                              "Failed to allocate memory for fdir bitmap!");
1165                 ret = -ENOMEM;
1166                 goto err_fdir_mem_alloc;
1167         }
1168         bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1169         if (bmp == NULL) {
1170                 PMD_INIT_LOG(ERR,
1171                              "Failed to initialization fdir bitmap!");
1172                 ret = -ENOMEM;
1173                 goto err_fdir_bmp_alloc;
1174         }
1175         for (i = 0; i < fdir_info->fdir_space_size; i++)
1176                 rte_bitmap_set(bmp, i);
1177
1178         fdir_info->fdir_flow_pool.bitmap = bmp;
1179
1180         return 0;
1181
1182 err_fdir_bmp_alloc:
1183         rte_free(mem);
1184 err_fdir_mem_alloc:
1185         rte_free(fdir_info->fdir_flow_pool.pool);
1186 err_fdir_bitmap_flow_alloc:
1187         rte_free(fdir_info->fdir_filter_array);
1188 err_fdir_filter_array_alloc:
1189         rte_free(fdir_info->hash_map);
1190 err_fdir_hash_map_alloc:
1191         rte_hash_free(fdir_info->hash_table);
1192
1193         return ret;
1194 }
1195
1196 static void
1197 i40e_init_customized_info(struct i40e_pf *pf)
1198 {
1199         int i;
1200
1201         /* Initialize customized pctype */
1202         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1203                 pf->customized_pctype[i].index = i;
1204                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1205                 pf->customized_pctype[i].valid = false;
1206         }
1207
1208         pf->gtp_support = false;
1209         pf->esp_support = false;
1210 }
1211
1212 static void
1213 i40e_init_filter_invalidation(struct i40e_pf *pf)
1214 {
1215         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1216         struct i40e_fdir_info *fdir_info = &pf->fdir;
1217         uint32_t glqf_ctl_reg = 0;
1218
1219         glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1220         if (!pf->support_multi_driver) {
1221                 fdir_info->fdir_invalprio = 1;
1222                 glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1223                 PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1224                 i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1225         } else {
1226                 if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1227                         fdir_info->fdir_invalprio = 1;
1228                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1229                 } else {
1230                         fdir_info->fdir_invalprio = 0;
1231                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1232                 }
1233         }
1234 }
1235
1236 void
1237 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1238 {
1239         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1240         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1241         struct i40e_queue_regions *info = &pf->queue_region;
1242         uint16_t i;
1243
1244         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1245                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1246
1247         memset(info, 0, sizeof(struct i40e_queue_regions));
1248 }
1249
1250 static int
1251 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1252                                const char *value,
1253                                void *opaque)
1254 {
1255         struct i40e_pf *pf;
1256         unsigned long support_multi_driver;
1257         char *end;
1258
1259         pf = (struct i40e_pf *)opaque;
1260
1261         errno = 0;
1262         support_multi_driver = strtoul(value, &end, 10);
1263         if (errno != 0 || end == value || *end != 0) {
1264                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1265                 return -(EINVAL);
1266         }
1267
1268         if (support_multi_driver == 1 || support_multi_driver == 0)
1269                 pf->support_multi_driver = (bool)support_multi_driver;
1270         else
1271                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1272                             "enable global configuration by default."
1273                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1274         return 0;
1275 }
1276
1277 static int
1278 i40e_support_multi_driver(struct rte_eth_dev *dev)
1279 {
1280         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1281         struct rte_kvargs *kvlist;
1282         int kvargs_count;
1283
1284         /* Enable global configuration by default */
1285         pf->support_multi_driver = false;
1286
1287         if (!dev->device->devargs)
1288                 return 0;
1289
1290         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1291         if (!kvlist)
1292                 return -EINVAL;
1293
1294         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1295         if (!kvargs_count) {
1296                 rte_kvargs_free(kvlist);
1297                 return 0;
1298         }
1299
1300         if (kvargs_count > 1)
1301                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1302                             "the first invalid or last valid one is used !",
1303                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1304
1305         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1306                                i40e_parse_multi_drv_handler, pf) < 0) {
1307                 rte_kvargs_free(kvlist);
1308                 return -EINVAL;
1309         }
1310
1311         rte_kvargs_free(kvlist);
1312         return 0;
1313 }
1314
1315 static int
1316 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1317                                     uint32_t reg_addr, uint64_t reg_val,
1318                                     struct i40e_asq_cmd_details *cmd_details)
1319 {
1320         uint64_t ori_reg_val;
1321         struct rte_eth_dev *dev;
1322         int ret;
1323
1324         ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1325         if (ret != I40E_SUCCESS) {
1326                 PMD_DRV_LOG(ERR,
1327                             "Fail to debug read from 0x%08x",
1328                             reg_addr);
1329                 return -EIO;
1330         }
1331         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
1332
1333         if (ori_reg_val != reg_val)
1334                 PMD_DRV_LOG(WARNING,
1335                             "i40e device %s changed global register [0x%08x]."
1336                             " original: 0x%"PRIx64", after: 0x%"PRIx64,
1337                             dev->device->name, reg_addr, ori_reg_val, reg_val);
1338
1339         return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1340 }
1341
1342 static int
1343 read_vf_msg_config(__rte_unused const char *key,
1344                                const char *value,
1345                                void *opaque)
1346 {
1347         struct i40e_vf_msg_cfg *cfg = opaque;
1348
1349         if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1350                         &cfg->ignore_second) != 3) {
1351                 memset(cfg, 0, sizeof(*cfg));
1352                 PMD_DRV_LOG(ERR, "format error! example: "
1353                                 "%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1354                 return -EINVAL;
1355         }
1356
1357         /*
1358          * If the message validation function been enabled, the 'period'
1359          * and 'ignore_second' must greater than 0.
1360          */
1361         if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1362                 memset(cfg, 0, sizeof(*cfg));
1363                 PMD_DRV_LOG(ERR, "%s error! the second and third"
1364                                 " number must be greater than 0!",
1365                                 ETH_I40E_VF_MSG_CFG);
1366                 return -EINVAL;
1367         }
1368
1369         return 0;
1370 }
1371
1372 static int
1373 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1374                 struct i40e_vf_msg_cfg *msg_cfg)
1375 {
1376         struct rte_kvargs *kvlist;
1377         int kvargs_count;
1378         int ret = 0;
1379
1380         memset(msg_cfg, 0, sizeof(*msg_cfg));
1381
1382         if (!dev->device->devargs)
1383                 return ret;
1384
1385         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1386         if (!kvlist)
1387                 return -EINVAL;
1388
1389         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1390         if (!kvargs_count)
1391                 goto free_end;
1392
1393         if (kvargs_count > 1) {
1394                 PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1395                                 ETH_I40E_VF_MSG_CFG);
1396                 ret = -EINVAL;
1397                 goto free_end;
1398         }
1399
1400         if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1401                         read_vf_msg_config, msg_cfg) < 0)
1402                 ret = -EINVAL;
1403
1404 free_end:
1405         rte_kvargs_free(kvlist);
1406         return ret;
1407 }
1408
1409 #define I40E_ALARM_INTERVAL 50000 /* us */
1410
1411 static int
1412 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1413 {
1414         struct rte_pci_device *pci_dev;
1415         struct rte_intr_handle *intr_handle;
1416         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1417         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1418         struct i40e_vsi *vsi;
1419         int ret;
1420         uint32_t len, val;
1421         uint8_t aq_fail = 0;
1422
1423         PMD_INIT_FUNC_TRACE();
1424
1425         dev->dev_ops = &i40e_eth_dev_ops;
1426         dev->rx_queue_count = i40e_dev_rx_queue_count;
1427         dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
1428         dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1429         dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1430         dev->rx_pkt_burst = i40e_recv_pkts;
1431         dev->tx_pkt_burst = i40e_xmit_pkts;
1432         dev->tx_pkt_prepare = i40e_prep_pkts;
1433
1434         /* for secondary processes, we don't initialise any further as primary
1435          * has already done this work. Only check we don't need a different
1436          * RX function */
1437         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1438                 i40e_set_rx_function(dev);
1439                 i40e_set_tx_function(dev);
1440                 return 0;
1441         }
1442         i40e_set_default_ptype_table(dev);
1443         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1444         intr_handle = &pci_dev->intr_handle;
1445
1446         rte_eth_copy_pci_info(dev, pci_dev);
1447         dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1448
1449         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1450         pf->adapter->eth_dev = dev;
1451         pf->dev_data = dev->data;
1452
1453         hw->back = I40E_PF_TO_ADAPTER(pf);
1454         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1455         if (!hw->hw_addr) {
1456                 PMD_INIT_LOG(ERR,
1457                         "Hardware is not available, as address is NULL");
1458                 return -ENODEV;
1459         }
1460
1461         hw->vendor_id = pci_dev->id.vendor_id;
1462         hw->device_id = pci_dev->id.device_id;
1463         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1464         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1465         hw->bus.device = pci_dev->addr.devid;
1466         hw->bus.func = pci_dev->addr.function;
1467         hw->adapter_stopped = 0;
1468         hw->adapter_closed = 0;
1469
1470         /* Init switch device pointer */
1471         hw->switch_dev = NULL;
1472
1473         /*
1474          * Switch Tag value should not be identical to either the First Tag
1475          * or Second Tag values. So set something other than common Ethertype
1476          * for internal switching.
1477          */
1478         hw->switch_tag = 0xffff;
1479
1480         val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1481         if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1482                 PMD_INIT_LOG(ERR, "\nERROR: "
1483                         "Firmware recovery mode detected. Limiting functionality.\n"
1484                         "Refer to the Intel(R) Ethernet Adapters and Devices "
1485                         "User Guide for details on firmware recovery mode.");
1486                 return -EIO;
1487         }
1488
1489         i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1490         /* Check if need to support multi-driver */
1491         i40e_support_multi_driver(dev);
1492
1493         /* Make sure all is clean before doing PF reset */
1494         i40e_clear_hw(hw);
1495
1496         /* Reset here to make sure all is clean for each PF */
1497         ret = i40e_pf_reset(hw);
1498         if (ret) {
1499                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1500                 return ret;
1501         }
1502
1503         /* Initialize the shared code (base driver) */
1504         ret = i40e_init_shared_code(hw);
1505         if (ret) {
1506                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1507                 return ret;
1508         }
1509
1510         /* Initialize the parameters for adminq */
1511         i40e_init_adminq_parameter(hw);
1512         ret = i40e_init_adminq(hw);
1513         if (ret != I40E_SUCCESS) {
1514                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1515                 return -EIO;
1516         }
1517         /* Firmware of SFP x722 does not support 802.1ad frames ability */
1518         if (hw->device_id == I40E_DEV_ID_SFP_X722 ||
1519                 hw->device_id == I40E_DEV_ID_SFP_I_X722)
1520                 hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1521
1522         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1523                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1524                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1525                      ((hw->nvm.version >> 12) & 0xf),
1526                      ((hw->nvm.version >> 4) & 0xff),
1527                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1528
1529         /* Initialize the hardware */
1530         i40e_hw_init(dev);
1531
1532         i40e_config_automask(pf);
1533
1534         i40e_set_default_pctype_table(dev);
1535
1536         /*
1537          * To work around the NVM issue, initialize registers
1538          * for packet type of QinQ by software.
1539          * It should be removed once issues are fixed in NVM.
1540          */
1541         if (!pf->support_multi_driver)
1542                 i40e_GLQF_reg_init(hw);
1543
1544         /* Initialize the input set for filters (hash and fd) to default value */
1545         i40e_filter_input_set_init(pf);
1546
1547         /* initialise the L3_MAP register */
1548         if (!pf->support_multi_driver) {
1549                 ret = i40e_aq_debug_write_global_register(hw,
1550                                                    I40E_GLQF_L3_MAP(40),
1551                                                    0x00000028,  NULL);
1552                 if (ret)
1553                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1554                                      ret);
1555                 PMD_INIT_LOG(DEBUG,
1556                              "Global register 0x%08x is changed with 0x28",
1557                              I40E_GLQF_L3_MAP(40));
1558         }
1559
1560         /* Need the special FW version to support floating VEB */
1561         config_floating_veb(dev);
1562         /* Clear PXE mode */
1563         i40e_clear_pxe_mode(hw);
1564         i40e_dev_sync_phy_type(hw);
1565
1566         /*
1567          * On X710, performance number is far from the expectation on recent
1568          * firmware versions. The fix for this issue may not be integrated in
1569          * the following firmware version. So the workaround in software driver
1570          * is needed. It needs to modify the initial values of 3 internal only
1571          * registers. Note that the workaround can be removed when it is fixed
1572          * in firmware in the future.
1573          */
1574         i40e_configure_registers(hw);
1575
1576         /* Get hw capabilities */
1577         ret = i40e_get_cap(hw);
1578         if (ret != I40E_SUCCESS) {
1579                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1580                 goto err_get_capabilities;
1581         }
1582
1583         /* Initialize parameters for PF */
1584         ret = i40e_pf_parameter_init(dev);
1585         if (ret != 0) {
1586                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1587                 goto err_parameter_init;
1588         }
1589
1590         /* Initialize the queue management */
1591         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1592         if (ret < 0) {
1593                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1594                 goto err_qp_pool_init;
1595         }
1596         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1597                                 hw->func_caps.num_msix_vectors - 1);
1598         if (ret < 0) {
1599                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1600                 goto err_msix_pool_init;
1601         }
1602
1603         /* Initialize lan hmc */
1604         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1605                                 hw->func_caps.num_rx_qp, 0, 0);
1606         if (ret != I40E_SUCCESS) {
1607                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1608                 goto err_init_lan_hmc;
1609         }
1610
1611         /* Configure lan hmc */
1612         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1613         if (ret != I40E_SUCCESS) {
1614                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1615                 goto err_configure_lan_hmc;
1616         }
1617
1618         /* Get and check the mac address */
1619         i40e_get_mac_addr(hw, hw->mac.addr);
1620         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1621                 PMD_INIT_LOG(ERR, "mac address is not valid");
1622                 ret = -EIO;
1623                 goto err_get_mac_addr;
1624         }
1625         /* Copy the permanent MAC address */
1626         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1627                         (struct rte_ether_addr *)hw->mac.perm_addr);
1628
1629         /* Disable flow control */
1630         hw->fc.requested_mode = I40E_FC_NONE;
1631         i40e_set_fc(hw, &aq_fail, TRUE);
1632
1633         /* Set the global registers with default ether type value */
1634         if (!pf->support_multi_driver) {
1635                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1636                                          RTE_ETHER_TYPE_VLAN);
1637                 if (ret != I40E_SUCCESS) {
1638                         PMD_INIT_LOG(ERR,
1639                                      "Failed to set the default outer "
1640                                      "VLAN ether type");
1641                         goto err_setup_pf_switch;
1642                 }
1643         }
1644
1645         /* PF setup, which includes VSI setup */
1646         ret = i40e_pf_setup(pf);
1647         if (ret) {
1648                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1649                 goto err_setup_pf_switch;
1650         }
1651
1652         vsi = pf->main_vsi;
1653
1654         /* Disable double vlan by default */
1655         i40e_vsi_config_double_vlan(vsi, FALSE);
1656
1657         /* Disable S-TAG identification when floating_veb is disabled */
1658         if (!pf->floating_veb) {
1659                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1660                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1661                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1662                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1663                 }
1664         }
1665
1666         if (!vsi->max_macaddrs)
1667                 len = RTE_ETHER_ADDR_LEN;
1668         else
1669                 len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1670
1671         /* Should be after VSI initialized */
1672         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1673         if (!dev->data->mac_addrs) {
1674                 PMD_INIT_LOG(ERR,
1675                         "Failed to allocated memory for storing mac address");
1676                 goto err_mac_alloc;
1677         }
1678         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1679                                         &dev->data->mac_addrs[0]);
1680
1681         /* Init dcb to sw mode by default */
1682         ret = i40e_dcb_init_configure(dev, TRUE);
1683         if (ret != I40E_SUCCESS) {
1684                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1685                 pf->flags &= ~I40E_FLAG_DCB;
1686         }
1687         /* Update HW struct after DCB configuration */
1688         i40e_get_cap(hw);
1689
1690         /* initialize pf host driver to setup SRIOV resource if applicable */
1691         i40e_pf_host_init(dev);
1692
1693         /* register callback func to eal lib */
1694         rte_intr_callback_register(intr_handle,
1695                                    i40e_dev_interrupt_handler, dev);
1696
1697         /* configure and enable device interrupt */
1698         i40e_pf_config_irq0(hw, TRUE);
1699         i40e_pf_enable_irq0(hw);
1700
1701         /* enable uio intr after callback register */
1702         rte_intr_enable(intr_handle);
1703
1704         /* By default disable flexible payload in global configuration */
1705         if (!pf->support_multi_driver)
1706                 i40e_flex_payload_reg_set_default(hw);
1707
1708         /*
1709          * Add an ethertype filter to drop all flow control frames transmitted
1710          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1711          * frames to wire.
1712          */
1713         i40e_add_tx_flow_control_drop_filter(pf);
1714
1715         /* Set the max frame size to 0x2600 by default,
1716          * in case other drivers changed the default value.
1717          */
1718         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
1719
1720         /* initialize mirror rule list */
1721         TAILQ_INIT(&pf->mirror_list);
1722
1723         /* initialize RSS rule list */
1724         TAILQ_INIT(&pf->rss_config_list);
1725
1726         /* initialize Traffic Manager configuration */
1727         i40e_tm_conf_init(dev);
1728
1729         /* Initialize customized information */
1730         i40e_init_customized_info(pf);
1731
1732         /* Initialize the filter invalidation configuration */
1733         i40e_init_filter_invalidation(pf);
1734
1735         ret = i40e_init_ethtype_filter_list(dev);
1736         if (ret < 0)
1737                 goto err_init_ethtype_filter_list;
1738         ret = i40e_init_tunnel_filter_list(dev);
1739         if (ret < 0)
1740                 goto err_init_tunnel_filter_list;
1741         ret = i40e_init_fdir_filter_list(dev);
1742         if (ret < 0)
1743                 goto err_init_fdir_filter_list;
1744
1745         /* initialize queue region configuration */
1746         i40e_init_queue_region_conf(dev);
1747
1748         /* reset all stats of the device, including pf and main vsi */
1749         i40e_dev_stats_reset(dev);
1750
1751         return 0;
1752
1753 err_init_fdir_filter_list:
1754         rte_free(pf->tunnel.hash_table);
1755         rte_free(pf->tunnel.hash_map);
1756 err_init_tunnel_filter_list:
1757         rte_free(pf->ethertype.hash_table);
1758         rte_free(pf->ethertype.hash_map);
1759 err_init_ethtype_filter_list:
1760         rte_free(dev->data->mac_addrs);
1761         dev->data->mac_addrs = NULL;
1762 err_mac_alloc:
1763         i40e_vsi_release(pf->main_vsi);
1764 err_setup_pf_switch:
1765 err_get_mac_addr:
1766 err_configure_lan_hmc:
1767         (void)i40e_shutdown_lan_hmc(hw);
1768 err_init_lan_hmc:
1769         i40e_res_pool_destroy(&pf->msix_pool);
1770 err_msix_pool_init:
1771         i40e_res_pool_destroy(&pf->qp_pool);
1772 err_qp_pool_init:
1773 err_parameter_init:
1774 err_get_capabilities:
1775         (void)i40e_shutdown_adminq(hw);
1776
1777         return ret;
1778 }
1779
1780 static void
1781 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1782 {
1783         struct i40e_ethertype_filter *p_ethertype;
1784         struct i40e_ethertype_rule *ethertype_rule;
1785
1786         ethertype_rule = &pf->ethertype;
1787         /* Remove all ethertype filter rules and hash */
1788         if (ethertype_rule->hash_map)
1789                 rte_free(ethertype_rule->hash_map);
1790         if (ethertype_rule->hash_table)
1791                 rte_hash_free(ethertype_rule->hash_table);
1792
1793         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1794                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1795                              p_ethertype, rules);
1796                 rte_free(p_ethertype);
1797         }
1798 }
1799
1800 static void
1801 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1802 {
1803         struct i40e_tunnel_filter *p_tunnel;
1804         struct i40e_tunnel_rule *tunnel_rule;
1805
1806         tunnel_rule = &pf->tunnel;
1807         /* Remove all tunnel director rules and hash */
1808         if (tunnel_rule->hash_map)
1809                 rte_free(tunnel_rule->hash_map);
1810         if (tunnel_rule->hash_table)
1811                 rte_hash_free(tunnel_rule->hash_table);
1812
1813         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1814                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1815                 rte_free(p_tunnel);
1816         }
1817 }
1818
1819 static void
1820 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1821 {
1822         struct i40e_fdir_filter *p_fdir;
1823         struct i40e_fdir_info *fdir_info;
1824
1825         fdir_info = &pf->fdir;
1826
1827         /* Remove all flow director rules */
1828         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1829                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1830 }
1831
1832 static void
1833 i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1834 {
1835         struct i40e_fdir_info *fdir_info;
1836
1837         fdir_info = &pf->fdir;
1838
1839         /* flow director memory cleanup */
1840         if (fdir_info->hash_map)
1841                 rte_free(fdir_info->hash_map);
1842         if (fdir_info->hash_table)
1843                 rte_hash_free(fdir_info->hash_table);
1844         if (fdir_info->fdir_flow_pool.bitmap)
1845                 rte_free(fdir_info->fdir_flow_pool.bitmap);
1846         if (fdir_info->fdir_flow_pool.pool)
1847                 rte_free(fdir_info->fdir_flow_pool.pool);
1848         if (fdir_info->fdir_filter_array)
1849                 rte_free(fdir_info->fdir_filter_array);
1850 }
1851
1852 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1853 {
1854         /*
1855          * Disable by default flexible payload
1856          * for corresponding L2/L3/L4 layers.
1857          */
1858         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1859         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1860         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1861 }
1862
1863 static int
1864 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1865 {
1866         struct i40e_hw *hw;
1867
1868         PMD_INIT_FUNC_TRACE();
1869
1870         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1871                 return 0;
1872
1873         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1874
1875         if (hw->adapter_closed == 0)
1876                 i40e_dev_close(dev);
1877
1878         return 0;
1879 }
1880
1881 static int
1882 i40e_dev_configure(struct rte_eth_dev *dev)
1883 {
1884         struct i40e_adapter *ad =
1885                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1886         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1887         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1888         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1889         int i, ret;
1890
1891         ret = i40e_dev_sync_phy_type(hw);
1892         if (ret)
1893                 return ret;
1894
1895         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1896          * bulk allocation or vector Rx preconditions we will reset it.
1897          */
1898         ad->rx_bulk_alloc_allowed = true;
1899         ad->rx_vec_allowed = true;
1900         ad->tx_simple_allowed = true;
1901         ad->tx_vec_allowed = true;
1902
1903         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1904                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1905
1906         /* Only legacy filter API needs the following fdir config. So when the
1907          * legacy filter API is deprecated, the following codes should also be
1908          * removed.
1909          */
1910         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1911                 ret = i40e_fdir_setup(pf);
1912                 if (ret != I40E_SUCCESS) {
1913                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1914                         return -ENOTSUP;
1915                 }
1916                 ret = i40e_fdir_configure(dev);
1917                 if (ret < 0) {
1918                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1919                         goto err;
1920                 }
1921         } else
1922                 i40e_fdir_teardown(pf);
1923
1924         ret = i40e_dev_init_vlan(dev);
1925         if (ret < 0)
1926                 goto err;
1927
1928         /* VMDQ setup.
1929          *  General PMD driver call sequence are NIC init, configure,
1930          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1931          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1932          *  applicable. So, VMDQ setting has to be done before
1933          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1934          *  For RSS setting, it will try to calculate actual configured RX queue
1935          *  number, which will be available after rx_queue_setup(). dev_start()
1936          *  function is good to place RSS setup.
1937          */
1938         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1939                 ret = i40e_vmdq_setup(dev);
1940                 if (ret)
1941                         goto err;
1942         }
1943
1944         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1945                 ret = i40e_dcb_setup(dev);
1946                 if (ret) {
1947                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1948                         goto err_dcb;
1949                 }
1950         }
1951
1952         TAILQ_INIT(&pf->flow_list);
1953
1954         return 0;
1955
1956 err_dcb:
1957         /* need to release vmdq resource if exists */
1958         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1959                 i40e_vsi_release(pf->vmdq[i].vsi);
1960                 pf->vmdq[i].vsi = NULL;
1961         }
1962         rte_free(pf->vmdq);
1963         pf->vmdq = NULL;
1964 err:
1965         /* Need to release fdir resource if exists.
1966          * Only legacy filter API needs the following fdir config. So when the
1967          * legacy filter API is deprecated, the following code should also be
1968          * removed.
1969          */
1970         i40e_fdir_teardown(pf);
1971         return ret;
1972 }
1973
1974 void
1975 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1976 {
1977         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1978         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1979         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1980         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1981         uint16_t msix_vect = vsi->msix_intr;
1982         uint16_t i;
1983
1984         for (i = 0; i < vsi->nb_qps; i++) {
1985                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1986                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1987                 rte_wmb();
1988         }
1989
1990         if (vsi->type != I40E_VSI_SRIOV) {
1991                 if (!rte_intr_allow_others(intr_handle)) {
1992                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1993                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1994                         I40E_WRITE_REG(hw,
1995                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1996                                        0);
1997                 } else {
1998                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1999                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
2000                         I40E_WRITE_REG(hw,
2001                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2002                                                        msix_vect - 1), 0);
2003                 }
2004         } else {
2005                 uint32_t reg;
2006                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2007                         vsi->user_param + (msix_vect - 1);
2008
2009                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2010                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
2011         }
2012         I40E_WRITE_FLUSH(hw);
2013 }
2014
2015 static void
2016 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
2017                        int base_queue, int nb_queue,
2018                        uint16_t itr_idx)
2019 {
2020         int i;
2021         uint32_t val;
2022         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2023         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2024
2025         /* Bind all RX queues to allocated MSIX interrupt */
2026         for (i = 0; i < nb_queue; i++) {
2027                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2028                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2029                         ((base_queue + i + 1) <<
2030                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2031                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2032                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2033
2034                 if (i == nb_queue - 1)
2035                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2036                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2037         }
2038
2039         /* Write first RX queue to Link list register as the head element */
2040         if (vsi->type != I40E_VSI_SRIOV) {
2041                 uint16_t interval =
2042                         i40e_calc_itr_interval(1, pf->support_multi_driver);
2043
2044                 if (msix_vect == I40E_MISC_VEC_ID) {
2045                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2046                                        (base_queue <<
2047                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2048                                        (0x0 <<
2049                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2050                         I40E_WRITE_REG(hw,
2051                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2052                                        interval);
2053                 } else {
2054                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2055                                        (base_queue <<
2056                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2057                                        (0x0 <<
2058                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2059                         I40E_WRITE_REG(hw,
2060                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2061                                                        msix_vect - 1),
2062                                        interval);
2063                 }
2064         } else {
2065                 uint32_t reg;
2066
2067                 if (msix_vect == I40E_MISC_VEC_ID) {
2068                         I40E_WRITE_REG(hw,
2069                                        I40E_VPINT_LNKLST0(vsi->user_param),
2070                                        (base_queue <<
2071                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2072                                        (0x0 <<
2073                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2074                 } else {
2075                         /* num_msix_vectors_vf needs to minus irq0 */
2076                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2077                                 vsi->user_param + (msix_vect - 1);
2078
2079                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2080                                        (base_queue <<
2081                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2082                                        (0x0 <<
2083                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2084                 }
2085         }
2086
2087         I40E_WRITE_FLUSH(hw);
2088 }
2089
2090 int
2091 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2092 {
2093         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2094         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2095         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2096         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2097         uint16_t msix_vect = vsi->msix_intr;
2098         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2099         uint16_t queue_idx = 0;
2100         int record = 0;
2101         int i;
2102
2103         for (i = 0; i < vsi->nb_qps; i++) {
2104                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2105                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2106         }
2107
2108         /* VF bind interrupt */
2109         if (vsi->type == I40E_VSI_SRIOV) {
2110                 if (vsi->nb_msix == 0) {
2111                         PMD_DRV_LOG(ERR, "No msix resource");
2112                         return -EINVAL;
2113                 }
2114                 __vsi_queues_bind_intr(vsi, msix_vect,
2115                                        vsi->base_queue, vsi->nb_qps,
2116                                        itr_idx);
2117                 return 0;
2118         }
2119
2120         /* PF & VMDq bind interrupt */
2121         if (rte_intr_dp_is_en(intr_handle)) {
2122                 if (vsi->type == I40E_VSI_MAIN) {
2123                         queue_idx = 0;
2124                         record = 1;
2125                 } else if (vsi->type == I40E_VSI_VMDQ2) {
2126                         struct i40e_vsi *main_vsi =
2127                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2128                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
2129                         record = 1;
2130                 }
2131         }
2132
2133         for (i = 0; i < vsi->nb_used_qps; i++) {
2134                 if (vsi->nb_msix == 0) {
2135                         PMD_DRV_LOG(ERR, "No msix resource");
2136                         return -EINVAL;
2137                 } else if (nb_msix <= 1) {
2138                         if (!rte_intr_allow_others(intr_handle))
2139                                 /* allow to share MISC_VEC_ID */
2140                                 msix_vect = I40E_MISC_VEC_ID;
2141
2142                         /* no enough msix_vect, map all to one */
2143                         __vsi_queues_bind_intr(vsi, msix_vect,
2144                                                vsi->base_queue + i,
2145                                                vsi->nb_used_qps - i,
2146                                                itr_idx);
2147                         for (; !!record && i < vsi->nb_used_qps; i++)
2148                                 intr_handle->intr_vec[queue_idx + i] =
2149                                         msix_vect;
2150                         break;
2151                 }
2152                 /* 1:1 queue/msix_vect mapping */
2153                 __vsi_queues_bind_intr(vsi, msix_vect,
2154                                        vsi->base_queue + i, 1,
2155                                        itr_idx);
2156                 if (!!record)
2157                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
2158
2159                 msix_vect++;
2160                 nb_msix--;
2161         }
2162
2163         return 0;
2164 }
2165
2166 void
2167 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2168 {
2169         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2170         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2171         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2172         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2173         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2174         uint16_t msix_intr, i;
2175
2176         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2177                 for (i = 0; i < vsi->nb_msix; i++) {
2178                         msix_intr = vsi->msix_intr + i;
2179                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2180                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
2181                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2182                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2183                 }
2184         else
2185                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2186                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
2187                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2188                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2189
2190         I40E_WRITE_FLUSH(hw);
2191 }
2192
2193 void
2194 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2195 {
2196         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2197         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2198         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2199         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2200         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2201         uint16_t msix_intr, i;
2202
2203         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2204                 for (i = 0; i < vsi->nb_msix; i++) {
2205                         msix_intr = vsi->msix_intr + i;
2206                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2207                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2208                 }
2209         else
2210                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2211                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2212
2213         I40E_WRITE_FLUSH(hw);
2214 }
2215
2216 static inline uint8_t
2217 i40e_parse_link_speeds(uint16_t link_speeds)
2218 {
2219         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2220
2221         if (link_speeds & ETH_LINK_SPEED_40G)
2222                 link_speed |= I40E_LINK_SPEED_40GB;
2223         if (link_speeds & ETH_LINK_SPEED_25G)
2224                 link_speed |= I40E_LINK_SPEED_25GB;
2225         if (link_speeds & ETH_LINK_SPEED_20G)
2226                 link_speed |= I40E_LINK_SPEED_20GB;
2227         if (link_speeds & ETH_LINK_SPEED_10G)
2228                 link_speed |= I40E_LINK_SPEED_10GB;
2229         if (link_speeds & ETH_LINK_SPEED_1G)
2230                 link_speed |= I40E_LINK_SPEED_1GB;
2231         if (link_speeds & ETH_LINK_SPEED_100M)
2232                 link_speed |= I40E_LINK_SPEED_100MB;
2233
2234         return link_speed;
2235 }
2236
2237 static int
2238 i40e_phy_conf_link(struct i40e_hw *hw,
2239                    uint8_t abilities,
2240                    uint8_t force_speed,
2241                    bool is_up)
2242 {
2243         enum i40e_status_code status;
2244         struct i40e_aq_get_phy_abilities_resp phy_ab;
2245         struct i40e_aq_set_phy_config phy_conf;
2246         enum i40e_aq_phy_type cnt;
2247         uint8_t avail_speed;
2248         uint32_t phy_type_mask = 0;
2249
2250         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2251                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2252                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2253                         I40E_AQ_PHY_FLAG_LOW_POWER;
2254         int ret = -ENOTSUP;
2255
2256         /* To get phy capabilities of available speeds. */
2257         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2258                                               NULL);
2259         if (status) {
2260                 PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2261                                 status);
2262                 return ret;
2263         }
2264         avail_speed = phy_ab.link_speed;
2265
2266         /* To get the current phy config. */
2267         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2268                                               NULL);
2269         if (status) {
2270                 PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2271                                 status);
2272                 return ret;
2273         }
2274
2275         /* If link needs to go up and it is in autoneg mode the speed is OK,
2276          * no need to set up again.
2277          */
2278         if (is_up && phy_ab.phy_type != 0 &&
2279                      abilities & I40E_AQ_PHY_AN_ENABLED &&
2280                      phy_ab.link_speed != 0)
2281                 return I40E_SUCCESS;
2282
2283         memset(&phy_conf, 0, sizeof(phy_conf));
2284
2285         /* bits 0-2 use the values from get_phy_abilities_resp */
2286         abilities &= ~mask;
2287         abilities |= phy_ab.abilities & mask;
2288
2289         phy_conf.abilities = abilities;
2290
2291         /* If link needs to go up, but the force speed is not supported,
2292          * Warn users and config the default available speeds.
2293          */
2294         if (is_up && !(force_speed & avail_speed)) {
2295                 PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2296                 phy_conf.link_speed = avail_speed;
2297         } else {
2298                 phy_conf.link_speed = is_up ? force_speed : avail_speed;
2299         }
2300
2301         /* PHY type mask needs to include each type except PHY type extension */
2302         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2303                 phy_type_mask |= 1 << cnt;
2304
2305         /* use get_phy_abilities_resp value for the rest */
2306         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2307         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2308                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2309                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
2310         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2311         phy_conf.eee_capability = phy_ab.eee_capability;
2312         phy_conf.eeer = phy_ab.eeer_val;
2313         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2314
2315         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2316                     phy_ab.abilities, phy_ab.link_speed);
2317         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2318                     phy_conf.abilities, phy_conf.link_speed);
2319
2320         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2321         if (status)
2322                 return ret;
2323
2324         return I40E_SUCCESS;
2325 }
2326
2327 static int
2328 i40e_apply_link_speed(struct rte_eth_dev *dev)
2329 {
2330         uint8_t speed;
2331         uint8_t abilities = 0;
2332         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2333         struct rte_eth_conf *conf = &dev->data->dev_conf;
2334
2335         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2336                      I40E_AQ_PHY_LINK_ENABLED;
2337
2338         if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2339                 conf->link_speeds = ETH_LINK_SPEED_40G |
2340                                     ETH_LINK_SPEED_25G |
2341                                     ETH_LINK_SPEED_20G |
2342                                     ETH_LINK_SPEED_10G |
2343                                     ETH_LINK_SPEED_1G |
2344                                     ETH_LINK_SPEED_100M;
2345
2346                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2347         } else {
2348                 abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2349         }
2350         speed = i40e_parse_link_speeds(conf->link_speeds);
2351
2352         return i40e_phy_conf_link(hw, abilities, speed, true);
2353 }
2354
2355 static int
2356 i40e_dev_start(struct rte_eth_dev *dev)
2357 {
2358         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2359         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2360         struct i40e_vsi *main_vsi = pf->main_vsi;
2361         int ret, i;
2362         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2363         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2364         uint32_t intr_vector = 0;
2365         struct i40e_vsi *vsi;
2366         uint16_t nb_rxq, nb_txq;
2367
2368         hw->adapter_stopped = 0;
2369
2370         rte_intr_disable(intr_handle);
2371
2372         if ((rte_intr_cap_multiple(intr_handle) ||
2373              !RTE_ETH_DEV_SRIOV(dev).active) &&
2374             dev->data->dev_conf.intr_conf.rxq != 0) {
2375                 intr_vector = dev->data->nb_rx_queues;
2376                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2377                 if (ret)
2378                         return ret;
2379         }
2380
2381         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2382                 intr_handle->intr_vec =
2383                         rte_zmalloc("intr_vec",
2384                                     dev->data->nb_rx_queues * sizeof(int),
2385                                     0);
2386                 if (!intr_handle->intr_vec) {
2387                         PMD_INIT_LOG(ERR,
2388                                 "Failed to allocate %d rx_queues intr_vec",
2389                                 dev->data->nb_rx_queues);
2390                         return -ENOMEM;
2391                 }
2392         }
2393
2394         /* Initialize VSI */
2395         ret = i40e_dev_rxtx_init(pf);
2396         if (ret != I40E_SUCCESS) {
2397                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2398                 return ret;
2399         }
2400
2401         /* Map queues with MSIX interrupt */
2402         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2403                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2404         ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2405         if (ret < 0)
2406                 return ret;
2407         i40e_vsi_enable_queues_intr(main_vsi);
2408
2409         /* Map VMDQ VSI queues with MSIX interrupt */
2410         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2411                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2412                 ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2413                                                 I40E_ITR_INDEX_DEFAULT);
2414                 if (ret < 0)
2415                         return ret;
2416                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2417         }
2418
2419         /* Enable all queues which have been configured */
2420         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
2421                 ret = i40e_dev_rx_queue_start(dev, nb_rxq);
2422                 if (ret)
2423                         goto rx_err;
2424         }
2425
2426         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
2427                 ret = i40e_dev_tx_queue_start(dev, nb_txq);
2428                 if (ret)
2429                         goto tx_err;
2430         }
2431
2432         /* Enable receiving broadcast packets */
2433         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2434         if (ret != I40E_SUCCESS)
2435                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2436
2437         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2438                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2439                                                 true, NULL);
2440                 if (ret != I40E_SUCCESS)
2441                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2442         }
2443
2444         /* Enable the VLAN promiscuous mode. */
2445         if (pf->vfs) {
2446                 for (i = 0; i < pf->vf_num; i++) {
2447                         vsi = pf->vfs[i].vsi;
2448                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2449                                                      true, NULL);
2450                 }
2451         }
2452
2453         /* Enable mac loopback mode */
2454         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2455             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2456                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2457                 if (ret != I40E_SUCCESS) {
2458                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2459                         goto tx_err;
2460                 }
2461         }
2462
2463         /* Apply link configure */
2464         ret = i40e_apply_link_speed(dev);
2465         if (I40E_SUCCESS != ret) {
2466                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2467                 goto tx_err;
2468         }
2469
2470         if (!rte_intr_allow_others(intr_handle)) {
2471                 rte_intr_callback_unregister(intr_handle,
2472                                              i40e_dev_interrupt_handler,
2473                                              (void *)dev);
2474                 /* configure and enable device interrupt */
2475                 i40e_pf_config_irq0(hw, FALSE);
2476                 i40e_pf_enable_irq0(hw);
2477
2478                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2479                         PMD_INIT_LOG(INFO,
2480                                 "lsc won't enable because of no intr multiplex");
2481         } else {
2482                 ret = i40e_aq_set_phy_int_mask(hw,
2483                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2484                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2485                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2486                 if (ret != I40E_SUCCESS)
2487                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2488
2489                 /* Call get_link_info aq commond to enable/disable LSE */
2490                 i40e_dev_link_update(dev, 0);
2491         }
2492
2493         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2494                 rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2495                                   i40e_dev_alarm_handler, dev);
2496         } else {
2497                 /* enable uio intr after callback register */
2498                 rte_intr_enable(intr_handle);
2499         }
2500
2501         i40e_filter_restore(pf);
2502
2503         if (pf->tm_conf.root && !pf->tm_conf.committed)
2504                 PMD_DRV_LOG(WARNING,
2505                             "please call hierarchy_commit() "
2506                             "before starting the port");
2507
2508         return I40E_SUCCESS;
2509
2510 tx_err:
2511         for (i = 0; i < nb_txq; i++)
2512                 i40e_dev_tx_queue_stop(dev, i);
2513 rx_err:
2514         for (i = 0; i < nb_rxq; i++)
2515                 i40e_dev_rx_queue_stop(dev, i);
2516
2517         return ret;
2518 }
2519
2520 static int
2521 i40e_dev_stop(struct rte_eth_dev *dev)
2522 {
2523         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2524         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2525         struct i40e_vsi *main_vsi = pf->main_vsi;
2526         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2527         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2528         int i;
2529
2530         if (hw->adapter_stopped == 1)
2531                 return 0;
2532
2533         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2534                 rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2535                 rte_intr_enable(intr_handle);
2536         }
2537
2538         /* Disable all queues */
2539         for (i = 0; i < dev->data->nb_tx_queues; i++)
2540                 i40e_dev_tx_queue_stop(dev, i);
2541
2542         for (i = 0; i < dev->data->nb_rx_queues; i++)
2543                 i40e_dev_rx_queue_stop(dev, i);
2544
2545         /* un-map queues with interrupt registers */
2546         i40e_vsi_disable_queues_intr(main_vsi);
2547         i40e_vsi_queues_unbind_intr(main_vsi);
2548
2549         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2550                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2551                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2552         }
2553
2554         /* Clear all queues and release memory */
2555         i40e_dev_clear_queues(dev);
2556
2557         /* Set link down */
2558         i40e_dev_set_link_down(dev);
2559
2560         if (!rte_intr_allow_others(intr_handle))
2561                 /* resume to the default handler */
2562                 rte_intr_callback_register(intr_handle,
2563                                            i40e_dev_interrupt_handler,
2564                                            (void *)dev);
2565
2566         /* Clean datapath event and queue/vec mapping */
2567         rte_intr_efd_disable(intr_handle);
2568         if (intr_handle->intr_vec) {
2569                 rte_free(intr_handle->intr_vec);
2570                 intr_handle->intr_vec = NULL;
2571         }
2572
2573         /* reset hierarchy commit */
2574         pf->tm_conf.committed = false;
2575
2576         hw->adapter_stopped = 1;
2577         dev->data->dev_started = 0;
2578
2579         pf->adapter->rss_reta_updated = 0;
2580
2581         return 0;
2582 }
2583
2584 static int
2585 i40e_dev_close(struct rte_eth_dev *dev)
2586 {
2587         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2588         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2589         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2590         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2591         struct i40e_mirror_rule *p_mirror;
2592         struct i40e_filter_control_settings settings;
2593         struct rte_flow *p_flow;
2594         uint32_t reg;
2595         int i;
2596         int ret;
2597         uint8_t aq_fail = 0;
2598         int retries = 0;
2599
2600         PMD_INIT_FUNC_TRACE();
2601         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2602                 return 0;
2603
2604         ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2605         if (ret)
2606                 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2607
2608
2609         ret = i40e_dev_stop(dev);
2610
2611         /* Remove all mirror rules */
2612         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2613                 ret = i40e_aq_del_mirror_rule(hw,
2614                                               pf->main_vsi->veb->seid,
2615                                               p_mirror->rule_type,
2616                                               p_mirror->entries,
2617                                               p_mirror->num_entries,
2618                                               p_mirror->id);
2619                 if (ret < 0)
2620                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2621                                     "status = %d, aq_err = %d.", ret,
2622                                     hw->aq.asq_last_status);
2623
2624                 /* remove mirror software resource anyway */
2625                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2626                 rte_free(p_mirror);
2627                 pf->nb_mirror_rule--;
2628         }
2629
2630         i40e_dev_free_queues(dev);
2631
2632         /* Disable interrupt */
2633         i40e_pf_disable_irq0(hw);
2634         rte_intr_disable(intr_handle);
2635
2636         /*
2637          * Only legacy filter API needs the following fdir config. So when the
2638          * legacy filter API is deprecated, the following code should also be
2639          * removed.
2640          */
2641         i40e_fdir_teardown(pf);
2642
2643         /* shutdown and destroy the HMC */
2644         i40e_shutdown_lan_hmc(hw);
2645
2646         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2647                 i40e_vsi_release(pf->vmdq[i].vsi);
2648                 pf->vmdq[i].vsi = NULL;
2649         }
2650         rte_free(pf->vmdq);
2651         pf->vmdq = NULL;
2652
2653         /* release all the existing VSIs and VEBs */
2654         i40e_vsi_release(pf->main_vsi);
2655
2656         /* shutdown the adminq */
2657         i40e_aq_queue_shutdown(hw, true);
2658         i40e_shutdown_adminq(hw);
2659
2660         i40e_res_pool_destroy(&pf->qp_pool);
2661         i40e_res_pool_destroy(&pf->msix_pool);
2662
2663         /* Disable flexible payload in global configuration */
2664         if (!pf->support_multi_driver)
2665                 i40e_flex_payload_reg_set_default(hw);
2666
2667         /* force a PF reset to clean anything leftover */
2668         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2669         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2670                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2671         I40E_WRITE_FLUSH(hw);
2672
2673         /* Clear PXE mode */
2674         i40e_clear_pxe_mode(hw);
2675
2676         /* Unconfigure filter control */
2677         memset(&settings, 0, sizeof(settings));
2678         ret = i40e_set_filter_control(hw, &settings);
2679         if (ret)
2680                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2681                                         ret);
2682
2683         /* Disable flow control */
2684         hw->fc.requested_mode = I40E_FC_NONE;
2685         i40e_set_fc(hw, &aq_fail, TRUE);
2686
2687         /* uninitialize pf host driver */
2688         i40e_pf_host_uninit(dev);
2689
2690         do {
2691                 ret = rte_intr_callback_unregister(intr_handle,
2692                                 i40e_dev_interrupt_handler, dev);
2693                 if (ret >= 0 || ret == -ENOENT) {
2694                         break;
2695                 } else if (ret != -EAGAIN) {
2696                         PMD_INIT_LOG(ERR,
2697                                  "intr callback unregister failed: %d",
2698                                  ret);
2699                 }
2700                 i40e_msec_delay(500);
2701         } while (retries++ < 5);
2702
2703         i40e_rm_ethtype_filter_list(pf);
2704         i40e_rm_tunnel_filter_list(pf);
2705         i40e_rm_fdir_filter_list(pf);
2706
2707         /* Remove all flows */
2708         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2709                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2710                 /* Do not free FDIR flows since they are static allocated */
2711                 if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
2712                         rte_free(p_flow);
2713         }
2714
2715         /* release the fdir static allocated memory */
2716         i40e_fdir_memory_cleanup(pf);
2717
2718         /* Remove all Traffic Manager configuration */
2719         i40e_tm_conf_uninit(dev);
2720
2721         i40e_clear_automask(pf);
2722
2723         hw->adapter_closed = 1;
2724         return ret;
2725 }
2726
2727 /*
2728  * Reset PF device only to re-initialize resources in PMD layer
2729  */
2730 static int
2731 i40e_dev_reset(struct rte_eth_dev *dev)
2732 {
2733         int ret;
2734
2735         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2736          * its VF to make them align with it. The detailed notification
2737          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2738          * To avoid unexpected behavior in VF, currently reset of PF with
2739          * SR-IOV activation is not supported. It might be supported later.
2740          */
2741         if (dev->data->sriov.active)
2742                 return -ENOTSUP;
2743
2744         ret = eth_i40e_dev_uninit(dev);
2745         if (ret)
2746                 return ret;
2747
2748         ret = eth_i40e_dev_init(dev, NULL);
2749
2750         return ret;
2751 }
2752
2753 static int
2754 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2755 {
2756         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2757         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2758         struct i40e_vsi *vsi = pf->main_vsi;
2759         int status;
2760
2761         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2762                                                      true, NULL, true);
2763         if (status != I40E_SUCCESS) {
2764                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2765                 return -EAGAIN;
2766         }
2767
2768         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2769                                                         TRUE, NULL);
2770         if (status != I40E_SUCCESS) {
2771                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2772                 /* Rollback unicast promiscuous mode */
2773                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2774                                                     false, NULL, true);
2775                 return -EAGAIN;
2776         }
2777
2778         return 0;
2779 }
2780
2781 static int
2782 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2783 {
2784         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2785         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2786         struct i40e_vsi *vsi = pf->main_vsi;
2787         int status;
2788
2789         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2790                                                      false, NULL, true);
2791         if (status != I40E_SUCCESS) {
2792                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2793                 return -EAGAIN;
2794         }
2795
2796         /* must remain in all_multicast mode */
2797         if (dev->data->all_multicast == 1)
2798                 return 0;
2799
2800         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2801                                                         false, NULL);
2802         if (status != I40E_SUCCESS) {
2803                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2804                 /* Rollback unicast promiscuous mode */
2805                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2806                                                     true, NULL, true);
2807                 return -EAGAIN;
2808         }
2809
2810         return 0;
2811 }
2812
2813 static int
2814 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2815 {
2816         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2817         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2818         struct i40e_vsi *vsi = pf->main_vsi;
2819         int ret;
2820
2821         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2822         if (ret != I40E_SUCCESS) {
2823                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2824                 return -EAGAIN;
2825         }
2826
2827         return 0;
2828 }
2829
2830 static int
2831 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2832 {
2833         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2834         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2835         struct i40e_vsi *vsi = pf->main_vsi;
2836         int ret;
2837
2838         if (dev->data->promiscuous == 1)
2839                 return 0; /* must remain in all_multicast mode */
2840
2841         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2842                                 vsi->seid, FALSE, NULL);
2843         if (ret != I40E_SUCCESS) {
2844                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2845                 return -EAGAIN;
2846         }
2847
2848         return 0;
2849 }
2850
2851 /*
2852  * Set device link up.
2853  */
2854 static int
2855 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2856 {
2857         /* re-apply link speed setting */
2858         return i40e_apply_link_speed(dev);
2859 }
2860
2861 /*
2862  * Set device link down.
2863  */
2864 static int
2865 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2866 {
2867         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2868         uint8_t abilities = 0;
2869         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2870
2871         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2872         return i40e_phy_conf_link(hw, abilities, speed, false);
2873 }
2874
2875 static __rte_always_inline void
2876 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2877 {
2878 /* Link status registers and values*/
2879 #define I40E_PRTMAC_LINKSTA             0x001E2420
2880 #define I40E_REG_LINK_UP                0x40000080
2881 #define I40E_PRTMAC_MACC                0x001E24E0
2882 #define I40E_REG_MACC_25GB              0x00020000
2883 #define I40E_REG_SPEED_MASK             0x38000000
2884 #define I40E_REG_SPEED_0                0x00000000
2885 #define I40E_REG_SPEED_1                0x08000000
2886 #define I40E_REG_SPEED_2                0x10000000
2887 #define I40E_REG_SPEED_3                0x18000000
2888 #define I40E_REG_SPEED_4                0x20000000
2889         uint32_t link_speed;
2890         uint32_t reg_val;
2891
2892         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2893         link_speed = reg_val & I40E_REG_SPEED_MASK;
2894         reg_val &= I40E_REG_LINK_UP;
2895         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2896
2897         if (unlikely(link->link_status == 0))
2898                 return;
2899
2900         /* Parse the link status */
2901         switch (link_speed) {
2902         case I40E_REG_SPEED_0:
2903                 link->link_speed = ETH_SPEED_NUM_100M;
2904                 break;
2905         case I40E_REG_SPEED_1:
2906                 link->link_speed = ETH_SPEED_NUM_1G;
2907                 break;
2908         case I40E_REG_SPEED_2:
2909                 if (hw->mac.type == I40E_MAC_X722)
2910                         link->link_speed = ETH_SPEED_NUM_2_5G;
2911                 else
2912                         link->link_speed = ETH_SPEED_NUM_10G;
2913                 break;
2914         case I40E_REG_SPEED_3:
2915                 if (hw->mac.type == I40E_MAC_X722) {
2916                         link->link_speed = ETH_SPEED_NUM_5G;
2917                 } else {
2918                         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2919
2920                         if (reg_val & I40E_REG_MACC_25GB)
2921                                 link->link_speed = ETH_SPEED_NUM_25G;
2922                         else
2923                                 link->link_speed = ETH_SPEED_NUM_40G;
2924                 }
2925                 break;
2926         case I40E_REG_SPEED_4:
2927                 if (hw->mac.type == I40E_MAC_X722)
2928                         link->link_speed = ETH_SPEED_NUM_10G;
2929                 else
2930                         link->link_speed = ETH_SPEED_NUM_20G;
2931                 break;
2932         default:
2933                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2934                 break;
2935         }
2936 }
2937
2938 static __rte_always_inline void
2939 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2940         bool enable_lse, int wait_to_complete)
2941 {
2942 #define CHECK_INTERVAL             100  /* 100ms */
2943 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2944         uint32_t rep_cnt = MAX_REPEAT_TIME;
2945         struct i40e_link_status link_status;
2946         int status;
2947
2948         memset(&link_status, 0, sizeof(link_status));
2949
2950         do {
2951                 memset(&link_status, 0, sizeof(link_status));
2952
2953                 /* Get link status information from hardware */
2954                 status = i40e_aq_get_link_info(hw, enable_lse,
2955                                                 &link_status, NULL);
2956                 if (unlikely(status != I40E_SUCCESS)) {
2957                         link->link_speed = ETH_SPEED_NUM_NONE;
2958                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2959                         PMD_DRV_LOG(ERR, "Failed to get link info");
2960                         return;
2961                 }
2962
2963                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2964                 if (!wait_to_complete || link->link_status)
2965                         break;
2966
2967                 rte_delay_ms(CHECK_INTERVAL);
2968         } while (--rep_cnt);
2969
2970         /* Parse the link status */
2971         switch (link_status.link_speed) {
2972         case I40E_LINK_SPEED_100MB:
2973                 link->link_speed = ETH_SPEED_NUM_100M;
2974                 break;
2975         case I40E_LINK_SPEED_1GB:
2976                 link->link_speed = ETH_SPEED_NUM_1G;
2977                 break;
2978         case I40E_LINK_SPEED_10GB:
2979                 link->link_speed = ETH_SPEED_NUM_10G;
2980                 break;
2981         case I40E_LINK_SPEED_20GB:
2982                 link->link_speed = ETH_SPEED_NUM_20G;
2983                 break;
2984         case I40E_LINK_SPEED_25GB:
2985                 link->link_speed = ETH_SPEED_NUM_25G;
2986                 break;
2987         case I40E_LINK_SPEED_40GB:
2988                 link->link_speed = ETH_SPEED_NUM_40G;
2989                 break;
2990         default:
2991                 if (link->link_status)
2992                         link->link_speed = ETH_SPEED_NUM_UNKNOWN;
2993                 else
2994                         link->link_speed = ETH_SPEED_NUM_NONE;
2995                 break;
2996         }
2997 }
2998
2999 int
3000 i40e_dev_link_update(struct rte_eth_dev *dev,
3001                      int wait_to_complete)
3002 {
3003         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3004         struct rte_eth_link link;
3005         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3006         int ret;
3007
3008         memset(&link, 0, sizeof(link));
3009
3010         /* i40e uses full duplex only */
3011         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3012         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3013                         ETH_LINK_SPEED_FIXED);
3014
3015         if (!wait_to_complete && !enable_lse)
3016                 update_link_reg(hw, &link);
3017         else
3018                 update_link_aq(hw, &link, enable_lse, wait_to_complete);
3019
3020         if (hw->switch_dev)
3021                 rte_eth_linkstatus_get(hw->switch_dev, &link);
3022
3023         ret = rte_eth_linkstatus_set(dev, &link);
3024         i40e_notify_all_vfs_link_status(dev);
3025
3026         return ret;
3027 }
3028
3029 static void
3030 i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
3031                           uint32_t loreg, bool offset_loaded, uint64_t *offset,
3032                           uint64_t *stat, uint64_t *prev_stat)
3033 {
3034         i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
3035         /* enlarge the limitation when statistics counters overflowed */
3036         if (offset_loaded) {
3037                 if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
3038                         *stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
3039                 *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
3040         }
3041         *prev_stat = *stat;
3042 }
3043
3044 /* Get all the statistics of a VSI */
3045 void
3046 i40e_update_vsi_stats(struct i40e_vsi *vsi)
3047 {
3048         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3049         struct i40e_eth_stats *nes = &vsi->eth_stats;
3050         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3051         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3052
3053         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3054                                   vsi->offset_loaded, &oes->rx_bytes,
3055                                   &nes->rx_bytes, &vsi->prev_rx_bytes);
3056         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3057                             vsi->offset_loaded, &oes->rx_unicast,
3058                             &nes->rx_unicast);
3059         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3060                             vsi->offset_loaded, &oes->rx_multicast,
3061                             &nes->rx_multicast);
3062         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3063                             vsi->offset_loaded, &oes->rx_broadcast,
3064                             &nes->rx_broadcast);
3065         /* exclude CRC bytes */
3066         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3067                 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3068
3069         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3070                             &oes->rx_discards, &nes->rx_discards);
3071         /* GLV_REPC not supported */
3072         /* GLV_RMPC not supported */
3073         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3074                             &oes->rx_unknown_protocol,
3075                             &nes->rx_unknown_protocol);
3076         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3077                                   vsi->offset_loaded, &oes->tx_bytes,
3078                                   &nes->tx_bytes, &vsi->prev_tx_bytes);
3079         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3080                             vsi->offset_loaded, &oes->tx_unicast,
3081                             &nes->tx_unicast);
3082         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3083                             vsi->offset_loaded, &oes->tx_multicast,
3084                             &nes->tx_multicast);
3085         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3086                             vsi->offset_loaded,  &oes->tx_broadcast,
3087                             &nes->tx_broadcast);
3088         /* GLV_TDPC not supported */
3089         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3090                             &oes->tx_errors, &nes->tx_errors);
3091         vsi->offset_loaded = true;
3092
3093         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3094                     vsi->vsi_id);
3095         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3096         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3097         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3098         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3099         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3100         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3101                     nes->rx_unknown_protocol);
3102         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3103         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3104         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3105         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3106         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3107         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3108         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3109                     vsi->vsi_id);
3110 }
3111
3112 static void
3113 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3114 {
3115         unsigned int i;
3116         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3117         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
3118
3119         /* Get rx/tx bytes of internal transfer packets */
3120         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
3121                                   I40E_GLV_GORCL(hw->port),
3122                                   pf->offset_loaded,
3123                                   &pf->internal_stats_offset.rx_bytes,
3124                                   &pf->internal_stats.rx_bytes,
3125                                   &pf->internal_prev_rx_bytes);
3126         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
3127                                   I40E_GLV_GOTCL(hw->port),
3128                                   pf->offset_loaded,
3129                                   &pf->internal_stats_offset.tx_bytes,
3130                                   &pf->internal_stats.tx_bytes,
3131                                   &pf->internal_prev_tx_bytes);
3132         /* Get total internal rx packet count */
3133         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3134                             I40E_GLV_UPRCL(hw->port),
3135                             pf->offset_loaded,
3136                             &pf->internal_stats_offset.rx_unicast,
3137                             &pf->internal_stats.rx_unicast);
3138         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3139                             I40E_GLV_MPRCL(hw->port),
3140                             pf->offset_loaded,
3141                             &pf->internal_stats_offset.rx_multicast,
3142                             &pf->internal_stats.rx_multicast);
3143         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3144                             I40E_GLV_BPRCL(hw->port),
3145                             pf->offset_loaded,
3146                             &pf->internal_stats_offset.rx_broadcast,
3147                             &pf->internal_stats.rx_broadcast);
3148         /* Get total internal tx packet count */
3149         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3150                             I40E_GLV_UPTCL(hw->port),
3151                             pf->offset_loaded,
3152                             &pf->internal_stats_offset.tx_unicast,
3153                             &pf->internal_stats.tx_unicast);
3154         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3155                             I40E_GLV_MPTCL(hw->port),
3156                             pf->offset_loaded,
3157                             &pf->internal_stats_offset.tx_multicast,
3158                             &pf->internal_stats.tx_multicast);
3159         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3160                             I40E_GLV_BPTCL(hw->port),
3161                             pf->offset_loaded,
3162                             &pf->internal_stats_offset.tx_broadcast,
3163                             &pf->internal_stats.tx_broadcast);
3164
3165         /* exclude CRC size */
3166         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3167                 pf->internal_stats.rx_multicast +
3168                 pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3169
3170         /* Get statistics of struct i40e_eth_stats */
3171         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3172                                   I40E_GLPRT_GORCL(hw->port),
3173                                   pf->offset_loaded, &os->eth.rx_bytes,
3174                                   &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3175         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3176                             I40E_GLPRT_UPRCL(hw->port),
3177                             pf->offset_loaded, &os->eth.rx_unicast,
3178                             &ns->eth.rx_unicast);
3179         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3180                             I40E_GLPRT_MPRCL(hw->port),
3181                             pf->offset_loaded, &os->eth.rx_multicast,
3182                             &ns->eth.rx_multicast);
3183         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3184                             I40E_GLPRT_BPRCL(hw->port),
3185                             pf->offset_loaded, &os->eth.rx_broadcast,
3186                             &ns->eth.rx_broadcast);
3187         /* Workaround: CRC size should not be included in byte statistics,
3188          * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3189          * packet.
3190          */
3191         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3192                 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3193
3194         /* exclude internal rx bytes
3195          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
3196          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
3197          * value.
3198          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
3199          */
3200         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3201                 ns->eth.rx_bytes = 0;
3202         else
3203                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3204
3205         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3206                 ns->eth.rx_unicast = 0;
3207         else
3208                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3209
3210         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3211                 ns->eth.rx_multicast = 0;
3212         else
3213                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3214
3215         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3216                 ns->eth.rx_broadcast = 0;
3217         else
3218                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3219
3220         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3221                             pf->offset_loaded, &os->eth.rx_discards,
3222                             &ns->eth.rx_discards);
3223         /* GLPRT_REPC not supported */
3224         /* GLPRT_RMPC not supported */
3225         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3226                             pf->offset_loaded,
3227                             &os->eth.rx_unknown_protocol,
3228                             &ns->eth.rx_unknown_protocol);
3229         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3230                                   I40E_GLPRT_GOTCL(hw->port),
3231                                   pf->offset_loaded, &os->eth.tx_bytes,
3232                                   &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3233         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3234                             I40E_GLPRT_UPTCL(hw->port),
3235                             pf->offset_loaded, &os->eth.tx_unicast,
3236                             &ns->eth.tx_unicast);
3237         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3238                             I40E_GLPRT_MPTCL(hw->port),
3239                             pf->offset_loaded, &os->eth.tx_multicast,
3240                             &ns->eth.tx_multicast);
3241         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3242                             I40E_GLPRT_BPTCL(hw->port),
3243                             pf->offset_loaded, &os->eth.tx_broadcast,
3244                             &ns->eth.tx_broadcast);
3245         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3246                 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3247
3248         /* exclude internal tx bytes
3249          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3250          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3251          * value.
3252          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3253          */
3254         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3255                 ns->eth.tx_bytes = 0;
3256         else
3257                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3258
3259         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3260                 ns->eth.tx_unicast = 0;
3261         else
3262                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3263
3264         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3265                 ns->eth.tx_multicast = 0;
3266         else
3267                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3268
3269         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3270                 ns->eth.tx_broadcast = 0;
3271         else
3272                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3273
3274         /* GLPRT_TEPC not supported */
3275
3276         /* additional port specific stats */
3277         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3278                             pf->offset_loaded, &os->tx_dropped_link_down,
3279                             &ns->tx_dropped_link_down);
3280         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3281                             pf->offset_loaded, &os->crc_errors,
3282                             &ns->crc_errors);
3283         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3284                             pf->offset_loaded, &os->illegal_bytes,
3285                             &ns->illegal_bytes);
3286         /* GLPRT_ERRBC not supported */
3287         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3288                             pf->offset_loaded, &os->mac_local_faults,
3289                             &ns->mac_local_faults);
3290         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3291                             pf->offset_loaded, &os->mac_remote_faults,
3292                             &ns->mac_remote_faults);
3293         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3294                             pf->offset_loaded, &os->rx_length_errors,
3295                             &ns->rx_length_errors);
3296         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3297                             pf->offset_loaded, &os->link_xon_rx,
3298                             &ns->link_xon_rx);
3299         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3300                             pf->offset_loaded, &os->link_xoff_rx,
3301                             &ns->link_xoff_rx);
3302         for (i = 0; i < 8; i++) {
3303                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3304                                     pf->offset_loaded,
3305                                     &os->priority_xon_rx[i],
3306                                     &ns->priority_xon_rx[i]);
3307                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3308                                     pf->offset_loaded,
3309                                     &os->priority_xoff_rx[i],
3310                                     &ns->priority_xoff_rx[i]);
3311         }
3312         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3313                             pf->offset_loaded, &os->link_xon_tx,
3314                             &ns->link_xon_tx);
3315         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3316                             pf->offset_loaded, &os->link_xoff_tx,
3317                             &ns->link_xoff_tx);
3318         for (i = 0; i < 8; i++) {
3319                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3320                                     pf->offset_loaded,
3321                                     &os->priority_xon_tx[i],
3322                                     &ns->priority_xon_tx[i]);
3323                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3324                                     pf->offset_loaded,
3325                                     &os->priority_xoff_tx[i],
3326                                     &ns->priority_xoff_tx[i]);
3327                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3328                                     pf->offset_loaded,
3329                                     &os->priority_xon_2_xoff[i],
3330                                     &ns->priority_xon_2_xoff[i]);
3331         }
3332         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3333                             I40E_GLPRT_PRC64L(hw->port),
3334                             pf->offset_loaded, &os->rx_size_64,
3335                             &ns->rx_size_64);
3336         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3337                             I40E_GLPRT_PRC127L(hw->port),
3338                             pf->offset_loaded, &os->rx_size_127,
3339                             &ns->rx_size_127);
3340         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3341                             I40E_GLPRT_PRC255L(hw->port),
3342                             pf->offset_loaded, &os->rx_size_255,
3343                             &ns->rx_size_255);
3344         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3345                             I40E_GLPRT_PRC511L(hw->port),
3346                             pf->offset_loaded, &os->rx_size_511,
3347                             &ns->rx_size_511);
3348         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3349                             I40E_GLPRT_PRC1023L(hw->port),
3350                             pf->offset_loaded, &os->rx_size_1023,
3351                             &ns->rx_size_1023);
3352         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3353                             I40E_GLPRT_PRC1522L(hw->port),
3354                             pf->offset_loaded, &os->rx_size_1522,
3355                             &ns->rx_size_1522);
3356         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3357                             I40E_GLPRT_PRC9522L(hw->port),
3358                             pf->offset_loaded, &os->rx_size_big,
3359                             &ns->rx_size_big);
3360         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3361                             pf->offset_loaded, &os->rx_undersize,
3362                             &ns->rx_undersize);
3363         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3364                             pf->offset_loaded, &os->rx_fragments,
3365                             &ns->rx_fragments);
3366         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3367                             pf->offset_loaded, &os->rx_oversize,
3368                             &ns->rx_oversize);
3369         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3370                             pf->offset_loaded, &os->rx_jabber,
3371                             &ns->rx_jabber);
3372         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3373                             I40E_GLPRT_PTC64L(hw->port),
3374                             pf->offset_loaded, &os->tx_size_64,
3375                             &ns->tx_size_64);
3376         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3377                             I40E_GLPRT_PTC127L(hw->port),
3378                             pf->offset_loaded, &os->tx_size_127,
3379                             &ns->tx_size_127);
3380         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3381                             I40E_GLPRT_PTC255L(hw->port),
3382                             pf->offset_loaded, &os->tx_size_255,
3383                             &ns->tx_size_255);
3384         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3385                             I40E_GLPRT_PTC511L(hw->port),
3386                             pf->offset_loaded, &os->tx_size_511,
3387                             &ns->tx_size_511);
3388         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3389                             I40E_GLPRT_PTC1023L(hw->port),
3390                             pf->offset_loaded, &os->tx_size_1023,
3391                             &ns->tx_size_1023);
3392         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3393                             I40E_GLPRT_PTC1522L(hw->port),
3394                             pf->offset_loaded, &os->tx_size_1522,
3395                             &ns->tx_size_1522);
3396         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3397                             I40E_GLPRT_PTC9522L(hw->port),
3398                             pf->offset_loaded, &os->tx_size_big,
3399                             &ns->tx_size_big);
3400         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3401                            pf->offset_loaded,
3402                            &os->fd_sb_match, &ns->fd_sb_match);
3403         /* GLPRT_MSPDC not supported */
3404         /* GLPRT_XEC not supported */
3405
3406         pf->offset_loaded = true;
3407
3408         if (pf->main_vsi)
3409                 i40e_update_vsi_stats(pf->main_vsi);
3410 }
3411
3412 /* Get all statistics of a port */
3413 static int
3414 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3415 {
3416         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3417         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3418         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3419         struct i40e_vsi *vsi;
3420         unsigned i;
3421
3422         /* call read registers - updates values, now write them to struct */
3423         i40e_read_stats_registers(pf, hw);
3424
3425         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3426                         pf->main_vsi->eth_stats.rx_multicast +
3427                         pf->main_vsi->eth_stats.rx_broadcast -
3428                         pf->main_vsi->eth_stats.rx_discards;
3429         stats->opackets = ns->eth.tx_unicast +
3430                         ns->eth.tx_multicast +
3431                         ns->eth.tx_broadcast;
3432         stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3433         stats->obytes   = ns->eth.tx_bytes;
3434         stats->oerrors  = ns->eth.tx_errors +
3435                         pf->main_vsi->eth_stats.tx_errors;
3436
3437         /* Rx Errors */
3438         stats->imissed  = ns->eth.rx_discards +
3439                         pf->main_vsi->eth_stats.rx_discards;
3440         stats->ierrors  = ns->crc_errors +
3441                         ns->rx_length_errors + ns->rx_undersize +
3442                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3443
3444         if (pf->vfs) {
3445                 for (i = 0; i < pf->vf_num; i++) {
3446                         vsi = pf->vfs[i].vsi;
3447                         i40e_update_vsi_stats(vsi);
3448
3449                         stats->ipackets += (vsi->eth_stats.rx_unicast +
3450                                         vsi->eth_stats.rx_multicast +
3451                                         vsi->eth_stats.rx_broadcast -
3452                                         vsi->eth_stats.rx_discards);
3453                         stats->ibytes   += vsi->eth_stats.rx_bytes;
3454                         stats->oerrors  += vsi->eth_stats.tx_errors;
3455                         stats->imissed  += vsi->eth_stats.rx_discards;
3456                 }
3457         }
3458
3459         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3460         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3461         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3462         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3463         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3464         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3465         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3466                     ns->eth.rx_unknown_protocol);
3467         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3468         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3469         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3470         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3471         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3472         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3473
3474         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3475                     ns->tx_dropped_link_down);
3476         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3477         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3478                     ns->illegal_bytes);
3479         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3480         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3481                     ns->mac_local_faults);
3482         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3483                     ns->mac_remote_faults);
3484         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3485                     ns->rx_length_errors);
3486         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3487         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3488         for (i = 0; i < 8; i++) {
3489                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3490                                 i, ns->priority_xon_rx[i]);
3491                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3492                                 i, ns->priority_xoff_rx[i]);
3493         }
3494         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3495         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3496         for (i = 0; i < 8; i++) {
3497                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3498                                 i, ns->priority_xon_tx[i]);
3499                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3500                                 i, ns->priority_xoff_tx[i]);
3501                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3502                                 i, ns->priority_xon_2_xoff[i]);
3503         }
3504         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3505         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3506         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3507         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3508         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3509         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3510         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3511         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3512         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3513         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3514         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3515         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3516         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3517         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3518         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3519         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3520         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3521         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3522         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3523                         ns->mac_short_packet_dropped);
3524         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3525                     ns->checksum_error);
3526         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3527         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3528         return 0;
3529 }
3530
3531 /* Reset the statistics */
3532 static int
3533 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3534 {
3535         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3536         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3537
3538         /* Mark PF and VSI stats to update the offset, aka "reset" */
3539         pf->offset_loaded = false;
3540         if (pf->main_vsi)
3541                 pf->main_vsi->offset_loaded = false;
3542
3543         /* read the stats, reading current register values into offset */
3544         i40e_read_stats_registers(pf, hw);
3545
3546         return 0;
3547 }
3548
3549 static uint32_t
3550 i40e_xstats_calc_num(void)
3551 {
3552         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3553                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3554                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3555 }
3556
3557 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3558                                      struct rte_eth_xstat_name *xstats_names,
3559                                      __rte_unused unsigned limit)
3560 {
3561         unsigned count = 0;
3562         unsigned i, prio;
3563
3564         if (xstats_names == NULL)
3565                 return i40e_xstats_calc_num();
3566
3567         /* Note: limit checked in rte_eth_xstats_names() */
3568
3569         /* Get stats from i40e_eth_stats struct */
3570         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3571                 strlcpy(xstats_names[count].name,
3572                         rte_i40e_stats_strings[i].name,
3573                         sizeof(xstats_names[count].name));
3574                 count++;
3575         }
3576
3577         /* Get individiual stats from i40e_hw_port struct */
3578         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3579                 strlcpy(xstats_names[count].name,
3580                         rte_i40e_hw_port_strings[i].name,
3581                         sizeof(xstats_names[count].name));
3582                 count++;
3583         }
3584
3585         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3586                 for (prio = 0; prio < 8; prio++) {
3587                         snprintf(xstats_names[count].name,
3588                                  sizeof(xstats_names[count].name),
3589                                  "rx_priority%u_%s", prio,
3590                                  rte_i40e_rxq_prio_strings[i].name);
3591                         count++;
3592                 }
3593         }
3594
3595         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3596                 for (prio = 0; prio < 8; prio++) {
3597                         snprintf(xstats_names[count].name,
3598                                  sizeof(xstats_names[count].name),
3599                                  "tx_priority%u_%s", prio,
3600                                  rte_i40e_txq_prio_strings[i].name);
3601                         count++;
3602                 }
3603         }
3604         return count;
3605 }
3606
3607 static int
3608 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3609                     unsigned n)
3610 {
3611         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3612         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3613         unsigned i, count, prio;
3614         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3615
3616         count = i40e_xstats_calc_num();
3617         if (n < count)
3618                 return count;
3619
3620         i40e_read_stats_registers(pf, hw);
3621
3622         if (xstats == NULL)
3623                 return 0;
3624
3625         count = 0;
3626
3627         /* Get stats from i40e_eth_stats struct */
3628         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3629                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3630                         rte_i40e_stats_strings[i].offset);
3631                 xstats[count].id = count;
3632                 count++;
3633         }
3634
3635         /* Get individiual stats from i40e_hw_port struct */
3636         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3637                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3638                         rte_i40e_hw_port_strings[i].offset);
3639                 xstats[count].id = count;
3640                 count++;
3641         }
3642
3643         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3644                 for (prio = 0; prio < 8; prio++) {
3645                         xstats[count].value =
3646                                 *(uint64_t *)(((char *)hw_stats) +
3647                                 rte_i40e_rxq_prio_strings[i].offset +
3648                                 (sizeof(uint64_t) * prio));
3649                         xstats[count].id = count;
3650                         count++;
3651                 }
3652         }
3653
3654         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3655                 for (prio = 0; prio < 8; prio++) {
3656                         xstats[count].value =
3657                                 *(uint64_t *)(((char *)hw_stats) +
3658                                 rte_i40e_txq_prio_strings[i].offset +
3659                                 (sizeof(uint64_t) * prio));
3660                         xstats[count].id = count;
3661                         count++;
3662                 }
3663         }
3664
3665         return count;
3666 }
3667
3668 static int
3669 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3670 {
3671         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3672         u32 full_ver;
3673         u8 ver, patch;
3674         u16 build;
3675         int ret;
3676
3677         full_ver = hw->nvm.oem_ver;
3678         ver = (u8)(full_ver >> 24);
3679         build = (u16)((full_ver >> 8) & 0xffff);
3680         patch = (u8)(full_ver & 0xff);
3681
3682         ret = snprintf(fw_version, fw_size,
3683                  "%d.%d%d 0x%08x %d.%d.%d",
3684                  ((hw->nvm.version >> 12) & 0xf),
3685                  ((hw->nvm.version >> 4) & 0xff),
3686                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3687                  ver, build, patch);
3688
3689         ret += 1; /* add the size of '\0' */
3690         if (fw_size < (u32)ret)
3691                 return ret;
3692         else
3693                 return 0;
3694 }
3695
3696 /*
3697  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3698  * the Rx data path does not hang if the FW LLDP is stopped.
3699  * return true if lldp need to stop
3700  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3701  */
3702 static bool
3703 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3704 {
3705         double nvm_ver;
3706         char ver_str[64] = {0};
3707         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3708
3709         i40e_fw_version_get(dev, ver_str, 64);
3710         nvm_ver = atof(ver_str);
3711         if ((hw->mac.type == I40E_MAC_X722 ||
3712              hw->mac.type == I40E_MAC_X722_VF) &&
3713              ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3714                 return true;
3715         else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3716                 return true;
3717
3718         return false;
3719 }
3720
3721 static int
3722 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3723 {
3724         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3725         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3726         struct i40e_vsi *vsi = pf->main_vsi;
3727         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3728
3729         dev_info->max_rx_queues = vsi->nb_qps;
3730         dev_info->max_tx_queues = vsi->nb_qps;
3731         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3732         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3733         dev_info->max_mac_addrs = vsi->max_macaddrs;
3734         dev_info->max_vfs = pci_dev->max_vfs;
3735         dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3736         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3737         dev_info->rx_queue_offload_capa = 0;
3738         dev_info->rx_offload_capa =
3739                 DEV_RX_OFFLOAD_VLAN_STRIP |
3740                 DEV_RX_OFFLOAD_QINQ_STRIP |
3741                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3742                 DEV_RX_OFFLOAD_UDP_CKSUM |
3743                 DEV_RX_OFFLOAD_TCP_CKSUM |
3744                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3745                 DEV_RX_OFFLOAD_KEEP_CRC |
3746                 DEV_RX_OFFLOAD_SCATTER |
3747                 DEV_RX_OFFLOAD_VLAN_EXTEND |
3748                 DEV_RX_OFFLOAD_VLAN_FILTER |
3749                 DEV_RX_OFFLOAD_JUMBO_FRAME |
3750                 DEV_RX_OFFLOAD_RSS_HASH;
3751
3752         dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3753         dev_info->tx_offload_capa =
3754                 DEV_TX_OFFLOAD_VLAN_INSERT |
3755                 DEV_TX_OFFLOAD_QINQ_INSERT |
3756                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3757                 DEV_TX_OFFLOAD_UDP_CKSUM |
3758                 DEV_TX_OFFLOAD_TCP_CKSUM |
3759                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3760                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3761                 DEV_TX_OFFLOAD_TCP_TSO |
3762                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3763                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3764                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3765                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3766                 DEV_TX_OFFLOAD_MULTI_SEGS |
3767                 dev_info->tx_queue_offload_capa;
3768         dev_info->dev_capa =
3769                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3770                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3771
3772         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3773                                                 sizeof(uint32_t);
3774         dev_info->reta_size = pf->hash_lut_size;
3775         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3776
3777         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3778                 .rx_thresh = {
3779                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3780                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3781                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3782                 },
3783                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3784                 .rx_drop_en = 0,
3785                 .offloads = 0,
3786         };
3787
3788         dev_info->default_txconf = (struct rte_eth_txconf) {
3789                 .tx_thresh = {
3790                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3791                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3792                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3793                 },
3794                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3795                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3796                 .offloads = 0,
3797         };
3798
3799         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3800                 .nb_max = I40E_MAX_RING_DESC,
3801                 .nb_min = I40E_MIN_RING_DESC,
3802                 .nb_align = I40E_ALIGN_RING_DESC,
3803         };
3804
3805         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3806                 .nb_max = I40E_MAX_RING_DESC,
3807                 .nb_min = I40E_MIN_RING_DESC,
3808                 .nb_align = I40E_ALIGN_RING_DESC,
3809                 .nb_seg_max = I40E_TX_MAX_SEG,
3810                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3811         };
3812
3813         if (pf->flags & I40E_FLAG_VMDQ) {
3814                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3815                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3816                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3817                                                 pf->max_nb_vmdq_vsi;
3818                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3819                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3820                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3821         }
3822
3823         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3824                 /* For XL710 */
3825                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3826                 dev_info->default_rxportconf.nb_queues = 2;
3827                 dev_info->default_txportconf.nb_queues = 2;
3828                 if (dev->data->nb_rx_queues == 1)
3829                         dev_info->default_rxportconf.ring_size = 2048;
3830                 else
3831                         dev_info->default_rxportconf.ring_size = 1024;
3832                 if (dev->data->nb_tx_queues == 1)
3833                         dev_info->default_txportconf.ring_size = 1024;
3834                 else
3835                         dev_info->default_txportconf.ring_size = 512;
3836
3837         } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3838                 /* For XXV710 */
3839                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3840                 dev_info->default_rxportconf.nb_queues = 1;
3841                 dev_info->default_txportconf.nb_queues = 1;
3842                 dev_info->default_rxportconf.ring_size = 256;
3843                 dev_info->default_txportconf.ring_size = 256;
3844         } else {
3845                 /* For X710 */
3846                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3847                 dev_info->default_rxportconf.nb_queues = 1;
3848                 dev_info->default_txportconf.nb_queues = 1;
3849                 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3850                         dev_info->default_rxportconf.ring_size = 512;
3851                         dev_info->default_txportconf.ring_size = 256;
3852                 } else {
3853                         dev_info->default_rxportconf.ring_size = 256;
3854                         dev_info->default_txportconf.ring_size = 256;
3855                 }
3856         }
3857         dev_info->default_rxportconf.burst_size = 32;
3858         dev_info->default_txportconf.burst_size = 32;
3859
3860         return 0;
3861 }
3862
3863 static int
3864 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3865 {
3866         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3867         struct i40e_vsi *vsi = pf->main_vsi;
3868         PMD_INIT_FUNC_TRACE();
3869
3870         if (on)
3871                 return i40e_vsi_add_vlan(vsi, vlan_id);
3872         else
3873                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3874 }
3875
3876 static int
3877 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3878                                 enum rte_vlan_type vlan_type,
3879                                 uint16_t tpid, int qinq)
3880 {
3881         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3882         uint64_t reg_r = 0;
3883         uint64_t reg_w = 0;
3884         uint16_t reg_id = 3;
3885         int ret;
3886
3887         if (qinq) {
3888                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3889                         reg_id = 2;
3890         }
3891
3892         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3893                                           &reg_r, NULL);
3894         if (ret != I40E_SUCCESS) {
3895                 PMD_DRV_LOG(ERR,
3896                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3897                            reg_id);
3898                 return -EIO;
3899         }
3900         PMD_DRV_LOG(DEBUG,
3901                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3902                     reg_id, reg_r);
3903
3904         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3905         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3906         if (reg_r == reg_w) {
3907                 PMD_DRV_LOG(DEBUG, "No need to write");
3908                 return 0;
3909         }
3910
3911         ret = i40e_aq_debug_write_global_register(hw,
3912                                            I40E_GL_SWT_L2TAGCTRL(reg_id),
3913                                            reg_w, NULL);
3914         if (ret != I40E_SUCCESS) {
3915                 PMD_DRV_LOG(ERR,
3916                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3917                             reg_id);
3918                 return -EIO;
3919         }
3920         PMD_DRV_LOG(DEBUG,
3921                     "Global register 0x%08x is changed with value 0x%08x",
3922                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3923
3924         return 0;
3925 }
3926
3927 static int
3928 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3929                    enum rte_vlan_type vlan_type,
3930                    uint16_t tpid)
3931 {
3932         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3933         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3934         int qinq = dev->data->dev_conf.rxmode.offloads &
3935                    DEV_RX_OFFLOAD_VLAN_EXTEND;
3936         int ret = 0;
3937
3938         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3939              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3940             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3941                 PMD_DRV_LOG(ERR,
3942                             "Unsupported vlan type.");
3943                 return -EINVAL;
3944         }
3945
3946         if (pf->support_multi_driver) {
3947                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3948                 return -ENOTSUP;
3949         }
3950
3951         /* 802.1ad frames ability is added in NVM API 1.7*/
3952         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3953                 if (qinq) {
3954                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3955                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3956                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3957                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3958                 } else {
3959                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3960                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3961                 }
3962                 ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3963                 if (ret != I40E_SUCCESS) {
3964                         PMD_DRV_LOG(ERR,
3965                                     "Set switch config failed aq_err: %d",
3966                                     hw->aq.asq_last_status);
3967                         ret = -EIO;
3968                 }
3969         } else
3970                 /* If NVM API < 1.7, keep the register setting */
3971                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3972                                                       tpid, qinq);
3973
3974         return ret;
3975 }
3976
3977 /* Configure outer vlan stripping on or off in QinQ mode */
3978 static int
3979 i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
3980 {
3981         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3982         int ret = I40E_SUCCESS;
3983         uint32_t reg;
3984
3985         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
3986                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
3987                 return -EINVAL;
3988         }
3989
3990         /* Configure for outer VLAN RX stripping */
3991         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
3992
3993         if (on)
3994                 reg |= I40E_VSI_TSR_QINQ_STRIP;
3995         else
3996                 reg &= ~I40E_VSI_TSR_QINQ_STRIP;
3997
3998         ret = i40e_aq_debug_write_register(hw,
3999                                                    I40E_VSI_TSR(vsi->vsi_id),
4000                                                    reg, NULL);
4001         if (ret < 0) {
4002                 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
4003                                     vsi->vsi_id);
4004                 return I40E_ERR_CONFIG;
4005         }
4006
4007         return ret;
4008 }
4009
4010 static int
4011 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4012 {
4013         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4014         struct i40e_vsi *vsi = pf->main_vsi;
4015         struct rte_eth_rxmode *rxmode;
4016
4017         rxmode = &dev->data->dev_conf.rxmode;
4018         if (mask & ETH_VLAN_FILTER_MASK) {
4019                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4020                         i40e_vsi_config_vlan_filter(vsi, TRUE);
4021                 else
4022                         i40e_vsi_config_vlan_filter(vsi, FALSE);
4023         }
4024
4025         if (mask & ETH_VLAN_STRIP_MASK) {
4026                 /* Enable or disable VLAN stripping */
4027                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4028                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
4029                 else
4030                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
4031         }
4032
4033         if (mask & ETH_VLAN_EXTEND_MASK) {
4034                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
4035                         i40e_vsi_config_double_vlan(vsi, TRUE);
4036                         /* Set global registers with default ethertype. */
4037                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
4038                                            RTE_ETHER_TYPE_VLAN);
4039                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
4040                                            RTE_ETHER_TYPE_VLAN);
4041                 }
4042                 else
4043                         i40e_vsi_config_double_vlan(vsi, FALSE);
4044         }
4045
4046         if (mask & ETH_QINQ_STRIP_MASK) {
4047                 /* Enable or disable outer VLAN stripping */
4048                 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
4049                         i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
4050                 else
4051                         i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
4052         }
4053
4054         return 0;
4055 }
4056
4057 static void
4058 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4059                           __rte_unused uint16_t queue,
4060                           __rte_unused int on)
4061 {
4062         PMD_INIT_FUNC_TRACE();
4063 }
4064
4065 static int
4066 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4067 {
4068         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4069         struct i40e_vsi *vsi = pf->main_vsi;
4070         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4071         struct i40e_vsi_vlan_pvid_info info;
4072
4073         memset(&info, 0, sizeof(info));
4074         info.on = on;
4075         if (info.on)
4076                 info.config.pvid = pvid;
4077         else {
4078                 info.config.reject.tagged =
4079                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
4080                 info.config.reject.untagged =
4081                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
4082         }
4083
4084         return i40e_vsi_vlan_pvid_set(vsi, &info);
4085 }
4086
4087 static int
4088 i40e_dev_led_on(struct rte_eth_dev *dev)
4089 {
4090         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4091         uint32_t mode = i40e_led_get(hw);
4092
4093         if (mode == 0)
4094                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
4095
4096         return 0;
4097 }
4098
4099 static int
4100 i40e_dev_led_off(struct rte_eth_dev *dev)
4101 {
4102         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4103         uint32_t mode = i40e_led_get(hw);
4104
4105         if (mode != 0)
4106                 i40e_led_set(hw, 0, false);
4107
4108         return 0;
4109 }
4110
4111 static int
4112 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4113 {
4114         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4115         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4116
4117         fc_conf->pause_time = pf->fc_conf.pause_time;
4118
4119         /* read out from register, in case they are modified by other port */
4120         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
4121                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
4122         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
4123                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
4124
4125         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4126         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4127
4128          /* Return current mode according to actual setting*/
4129         switch (hw->fc.current_mode) {
4130         case I40E_FC_FULL:
4131                 fc_conf->mode = RTE_FC_FULL;
4132                 break;
4133         case I40E_FC_TX_PAUSE:
4134                 fc_conf->mode = RTE_FC_TX_PAUSE;
4135                 break;
4136         case I40E_FC_RX_PAUSE:
4137                 fc_conf->mode = RTE_FC_RX_PAUSE;
4138                 break;
4139         case I40E_FC_NONE:
4140         default:
4141                 fc_conf->mode = RTE_FC_NONE;
4142         };
4143
4144         return 0;
4145 }
4146
4147 static int
4148 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4149 {
4150         uint32_t mflcn_reg, fctrl_reg, reg;
4151         uint32_t max_high_water;
4152         uint8_t i, aq_failure;
4153         int err;
4154         struct i40e_hw *hw;
4155         struct i40e_pf *pf;
4156         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4157                 [RTE_FC_NONE] = I40E_FC_NONE,
4158                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4159                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4160                 [RTE_FC_FULL] = I40E_FC_FULL
4161         };
4162
4163         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4164
4165         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4166         if ((fc_conf->high_water > max_high_water) ||
4167                         (fc_conf->high_water < fc_conf->low_water)) {
4168                 PMD_INIT_LOG(ERR,
4169                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
4170                         max_high_water);
4171                 return -EINVAL;
4172         }
4173
4174         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4175         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4176         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4177
4178         pf->fc_conf.pause_time = fc_conf->pause_time;
4179         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4180         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4181
4182         PMD_INIT_FUNC_TRACE();
4183
4184         /* All the link flow control related enable/disable register
4185          * configuration is handle by the F/W
4186          */
4187         err = i40e_set_fc(hw, &aq_failure, true);
4188         if (err < 0)
4189                 return -ENOSYS;
4190
4191         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4192                 /* Configure flow control refresh threshold,
4193                  * the value for stat_tx_pause_refresh_timer[8]
4194                  * is used for global pause operation.
4195                  */
4196
4197                 I40E_WRITE_REG(hw,
4198                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4199                                pf->fc_conf.pause_time);
4200
4201                 /* configure the timer value included in transmitted pause
4202                  * frame,
4203                  * the value for stat_tx_pause_quanta[8] is used for global
4204                  * pause operation
4205                  */
4206                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4207                                pf->fc_conf.pause_time);
4208
4209                 fctrl_reg = I40E_READ_REG(hw,
4210                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4211
4212                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4213                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4214                 else
4215                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4216
4217                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4218                                fctrl_reg);
4219         } else {
4220                 /* Configure pause time (2 TCs per register) */
4221                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4222                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4223                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4224
4225                 /* Configure flow control refresh threshold value */
4226                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4227                                pf->fc_conf.pause_time / 2);
4228
4229                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4230
4231                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
4232                  *depending on configuration
4233                  */
4234                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
4235                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4236                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4237                 } else {
4238                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4239                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4240                 }
4241
4242                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4243         }
4244
4245         if (!pf->support_multi_driver) {
4246                 /* config water marker both based on the packets and bytes */
4247                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4248                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4249                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4250                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4251                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4252                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4253                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4254                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4255                                   << I40E_KILOSHIFT);
4256                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4257                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4258                                    << I40E_KILOSHIFT);
4259         } else {
4260                 PMD_DRV_LOG(ERR,
4261                             "Water marker configuration is not supported.");
4262         }
4263
4264         I40E_WRITE_FLUSH(hw);
4265
4266         return 0;
4267 }
4268
4269 static int
4270 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4271                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4272 {
4273         PMD_INIT_FUNC_TRACE();
4274
4275         return -ENOSYS;
4276 }
4277
4278 /* Add a MAC address, and update filters */
4279 static int
4280 i40e_macaddr_add(struct rte_eth_dev *dev,
4281                  struct rte_ether_addr *mac_addr,
4282                  __rte_unused uint32_t index,
4283                  uint32_t pool)
4284 {
4285         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4286         struct i40e_mac_filter_info mac_filter;
4287         struct i40e_vsi *vsi;
4288         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4289         int ret;
4290
4291         /* If VMDQ not enabled or configured, return */
4292         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4293                           !pf->nb_cfg_vmdq_vsi)) {
4294                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4295                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4296                         pool);
4297                 return -ENOTSUP;
4298         }
4299
4300         if (pool > pf->nb_cfg_vmdq_vsi) {
4301                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4302                                 pool, pf->nb_cfg_vmdq_vsi);
4303                 return -EINVAL;
4304         }
4305
4306         rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4307         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4308                 mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
4309         else
4310                 mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
4311
4312         if (pool == 0)
4313                 vsi = pf->main_vsi;
4314         else
4315                 vsi = pf->vmdq[pool - 1].vsi;
4316
4317         ret = i40e_vsi_add_mac(vsi, &mac_filter);
4318         if (ret != I40E_SUCCESS) {
4319                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4320                 return -ENODEV;
4321         }
4322         return 0;
4323 }
4324
4325 /* Remove a MAC address, and update filters */
4326 static void
4327 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4328 {
4329         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4330         struct i40e_vsi *vsi;
4331         struct rte_eth_dev_data *data = dev->data;
4332         struct rte_ether_addr *macaddr;
4333         int ret;
4334         uint32_t i;
4335         uint64_t pool_sel;
4336
4337         macaddr = &(data->mac_addrs[index]);
4338
4339         pool_sel = dev->data->mac_pool_sel[index];
4340
4341         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4342                 if (pool_sel & (1ULL << i)) {
4343                         if (i == 0)
4344                                 vsi = pf->main_vsi;
4345                         else {
4346                                 /* No VMDQ pool enabled or configured */
4347                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
4348                                         (i > pf->nb_cfg_vmdq_vsi)) {
4349                                         PMD_DRV_LOG(ERR,
4350                                                 "No VMDQ pool enabled/configured");
4351                                         return;
4352                                 }
4353                                 vsi = pf->vmdq[i - 1].vsi;
4354                         }
4355                         ret = i40e_vsi_delete_mac(vsi, macaddr);
4356
4357                         if (ret) {
4358                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4359                                 return;
4360                         }
4361                 }
4362         }
4363 }
4364
4365 static int
4366 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4367 {
4368         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4369         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4370         uint32_t reg;
4371         int ret;
4372
4373         if (!lut)
4374                 return -EINVAL;
4375
4376         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4377                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4378                                           vsi->type != I40E_VSI_SRIOV,
4379                                           lut, lut_size);
4380                 if (ret) {
4381                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4382                         return ret;
4383                 }
4384         } else {
4385                 uint32_t *lut_dw = (uint32_t *)lut;
4386                 uint16_t i, lut_size_dw = lut_size / 4;
4387
4388                 if (vsi->type == I40E_VSI_SRIOV) {
4389                         for (i = 0; i <= lut_size_dw; i++) {
4390                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4391                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4392                         }
4393                 } else {
4394                         for (i = 0; i < lut_size_dw; i++)
4395                                 lut_dw[i] = I40E_READ_REG(hw,
4396                                                           I40E_PFQF_HLUT(i));
4397                 }
4398         }
4399
4400         return 0;
4401 }
4402
4403 int
4404 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4405 {
4406         struct i40e_pf *pf;
4407         struct i40e_hw *hw;
4408
4409         if (!vsi || !lut)
4410                 return -EINVAL;
4411
4412         pf = I40E_VSI_TO_PF(vsi);
4413         hw = I40E_VSI_TO_HW(vsi);
4414
4415         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4416                 enum i40e_status_code status;
4417
4418                 status = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4419                                              vsi->type != I40E_VSI_SRIOV,
4420                                              lut, lut_size);
4421                 if (status) {
4422                         PMD_DRV_LOG(ERR,
4423                                     "Failed to update RSS lookup table, error status: %d",
4424                                     status);
4425                         return -EIO;
4426                 }
4427         } else {
4428                 uint32_t *lut_dw = (uint32_t *)lut;
4429                 uint16_t i, lut_size_dw = lut_size / 4;
4430
4431                 if (vsi->type == I40E_VSI_SRIOV) {
4432                         for (i = 0; i < lut_size_dw; i++)
4433                                 I40E_WRITE_REG(
4434                                         hw,
4435                                         I40E_VFQF_HLUT1(i, vsi->user_param),
4436                                         lut_dw[i]);
4437                 } else {
4438                         for (i = 0; i < lut_size_dw; i++)
4439                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4440                                                lut_dw[i]);
4441                 }
4442                 I40E_WRITE_FLUSH(hw);
4443         }
4444
4445         return 0;
4446 }
4447
4448 static int
4449 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4450                          struct rte_eth_rss_reta_entry64 *reta_conf,
4451                          uint16_t reta_size)
4452 {
4453         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4454         uint16_t i, lut_size = pf->hash_lut_size;
4455         uint16_t idx, shift;
4456         uint8_t *lut;
4457         int ret;
4458
4459         if (reta_size != lut_size ||
4460                 reta_size > ETH_RSS_RETA_SIZE_512) {
4461                 PMD_DRV_LOG(ERR,
4462                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4463                         reta_size, lut_size);
4464                 return -EINVAL;
4465         }
4466
4467         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4468         if (!lut) {
4469                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4470                 return -ENOMEM;
4471         }
4472         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4473         if (ret)
4474                 goto out;
4475         for (i = 0; i < reta_size; i++) {
4476                 idx = i / RTE_RETA_GROUP_SIZE;
4477                 shift = i % RTE_RETA_GROUP_SIZE;
4478                 if (reta_conf[idx].mask & (1ULL << shift))
4479                         lut[i] = reta_conf[idx].reta[shift];
4480         }
4481         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4482
4483         pf->adapter->rss_reta_updated = 1;
4484
4485 out:
4486         rte_free(lut);
4487
4488         return ret;
4489 }
4490
4491 static int
4492 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4493                         struct rte_eth_rss_reta_entry64 *reta_conf,
4494                         uint16_t reta_size)
4495 {
4496         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4497         uint16_t i, lut_size = pf->hash_lut_size;
4498         uint16_t idx, shift;
4499         uint8_t *lut;
4500         int ret;
4501
4502         if (reta_size != lut_size ||
4503                 reta_size > ETH_RSS_RETA_SIZE_512) {
4504                 PMD_DRV_LOG(ERR,
4505                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4506                         reta_size, lut_size);
4507                 return -EINVAL;
4508         }
4509
4510         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4511         if (!lut) {
4512                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4513                 return -ENOMEM;
4514         }
4515
4516         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4517         if (ret)
4518                 goto out;
4519         for (i = 0; i < reta_size; i++) {
4520                 idx = i / RTE_RETA_GROUP_SIZE;
4521                 shift = i % RTE_RETA_GROUP_SIZE;
4522                 if (reta_conf[idx].mask & (1ULL << shift))
4523                         reta_conf[idx].reta[shift] = lut[i];
4524         }
4525
4526 out:
4527         rte_free(lut);
4528
4529         return ret;
4530 }
4531
4532 /**
4533  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4534  * @hw:   pointer to the HW structure
4535  * @mem:  pointer to mem struct to fill out
4536  * @size: size of memory requested
4537  * @alignment: what to align the allocation to
4538  **/
4539 enum i40e_status_code
4540 i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4541                         struct i40e_dma_mem *mem,
4542                         u64 size,
4543                         u32 alignment)
4544 {
4545         const struct rte_memzone *mz = NULL;
4546         char z_name[RTE_MEMZONE_NAMESIZE];
4547
4548         if (!mem)
4549                 return I40E_ERR_PARAM;
4550
4551         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4552         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4553                         RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4554         if (!mz)
4555                 return I40E_ERR_NO_MEMORY;
4556
4557         mem->size = size;
4558         mem->va = mz->addr;
4559         mem->pa = mz->iova;
4560         mem->zone = (const void *)mz;
4561         PMD_DRV_LOG(DEBUG,
4562                 "memzone %s allocated with physical address: %"PRIu64,
4563                 mz->name, mem->pa);
4564
4565         return I40E_SUCCESS;
4566 }
4567
4568 /**
4569  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4570  * @hw:   pointer to the HW structure
4571  * @mem:  ptr to mem struct to free
4572  **/
4573 enum i40e_status_code
4574 i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4575                     struct i40e_dma_mem *mem)
4576 {
4577         if (!mem)
4578                 return I40E_ERR_PARAM;
4579
4580         PMD_DRV_LOG(DEBUG,
4581                 "memzone %s to be freed with physical address: %"PRIu64,
4582                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4583         rte_memzone_free((const struct rte_memzone *)mem->zone);
4584         mem->zone = NULL;
4585         mem->va = NULL;
4586         mem->pa = (u64)0;
4587
4588         return I40E_SUCCESS;
4589 }
4590
4591 /**
4592  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4593  * @hw:   pointer to the HW structure
4594  * @mem:  pointer to mem struct to fill out
4595  * @size: size of memory requested
4596  **/
4597 enum i40e_status_code
4598 i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4599                          struct i40e_virt_mem *mem,
4600                          u32 size)
4601 {
4602         if (!mem)
4603                 return I40E_ERR_PARAM;
4604
4605         mem->size = size;
4606         mem->va = rte_zmalloc("i40e", size, 0);
4607
4608         if (mem->va)
4609                 return I40E_SUCCESS;
4610         else
4611                 return I40E_ERR_NO_MEMORY;
4612 }
4613
4614 /**
4615  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4616  * @hw:   pointer to the HW structure
4617  * @mem:  pointer to mem struct to free
4618  **/
4619 enum i40e_status_code
4620 i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4621                      struct i40e_virt_mem *mem)
4622 {
4623         if (!mem)
4624                 return I40E_ERR_PARAM;
4625
4626         rte_free(mem->va);
4627         mem->va = NULL;
4628
4629         return I40E_SUCCESS;
4630 }
4631
4632 void
4633 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4634 {
4635         rte_spinlock_init(&sp->spinlock);
4636 }
4637
4638 void
4639 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4640 {
4641         rte_spinlock_lock(&sp->spinlock);
4642 }
4643
4644 void
4645 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4646 {
4647         rte_spinlock_unlock(&sp->spinlock);
4648 }
4649
4650 void
4651 i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
4652 {
4653         return;
4654 }
4655
4656 /**
4657  * Get the hardware capabilities, which will be parsed
4658  * and saved into struct i40e_hw.
4659  */
4660 static int
4661 i40e_get_cap(struct i40e_hw *hw)
4662 {
4663         struct i40e_aqc_list_capabilities_element_resp *buf;
4664         uint16_t len, size = 0;
4665         int ret;
4666
4667         /* Calculate a huge enough buff for saving response data temporarily */
4668         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4669                                                 I40E_MAX_CAP_ELE_NUM;
4670         buf = rte_zmalloc("i40e", len, 0);
4671         if (!buf) {
4672                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4673                 return I40E_ERR_NO_MEMORY;
4674         }
4675
4676         /* Get, parse the capabilities and save it to hw */
4677         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4678                         i40e_aqc_opc_list_func_capabilities, NULL);
4679         if (ret != I40E_SUCCESS)
4680                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4681
4682         /* Free the temporary buffer after being used */
4683         rte_free(buf);
4684
4685         return ret;
4686 }
4687
4688 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4689
4690 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4691                 const char *value,
4692                 void *opaque)
4693 {
4694         struct i40e_pf *pf;
4695         unsigned long num;
4696         char *end;
4697
4698         pf = (struct i40e_pf *)opaque;
4699         RTE_SET_USED(key);
4700
4701         errno = 0;
4702         num = strtoul(value, &end, 0);
4703         if (errno != 0 || end == value || *end != 0) {
4704                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4705                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4706                 return -(EINVAL);
4707         }
4708
4709         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4710                 pf->vf_nb_qp_max = (uint16_t)num;
4711         else
4712                 /* here return 0 to make next valid same argument work */
4713                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4714                             "power of 2 and equal or less than 16 !, Now it is "
4715                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4716
4717         return 0;
4718 }
4719
4720 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4721 {
4722         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4723         struct rte_kvargs *kvlist;
4724         int kvargs_count;
4725
4726         /* set default queue number per VF as 4 */
4727         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4728
4729         if (dev->device->devargs == NULL)
4730                 return 0;
4731
4732         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4733         if (kvlist == NULL)
4734                 return -(EINVAL);
4735
4736         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4737         if (!kvargs_count) {
4738                 rte_kvargs_free(kvlist);
4739                 return 0;
4740         }
4741
4742         if (kvargs_count > 1)
4743                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4744                             "the first invalid or last valid one is used !",
4745                             ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4746
4747         rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4748                            i40e_pf_parse_vf_queue_number_handler, pf);
4749
4750         rte_kvargs_free(kvlist);
4751
4752         return 0;
4753 }
4754
4755 static int
4756 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4757 {
4758         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4759         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4760         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4761         uint16_t qp_count = 0, vsi_count = 0;
4762
4763         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4764                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4765                 return -EINVAL;
4766         }
4767
4768         i40e_pf_config_vf_rxq_number(dev);
4769
4770         /* Add the parameter init for LFC */
4771         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4772         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4773         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4774
4775         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4776         pf->max_num_vsi = hw->func_caps.num_vsis;
4777         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4778         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4779
4780         /* FDir queue/VSI allocation */
4781         pf->fdir_qp_offset = 0;
4782         if (hw->func_caps.fd) {
4783                 pf->flags |= I40E_FLAG_FDIR;
4784                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4785         } else {
4786                 pf->fdir_nb_qps = 0;
4787         }
4788         qp_count += pf->fdir_nb_qps;
4789         vsi_count += 1;
4790
4791         /* LAN queue/VSI allocation */
4792         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4793         if (!hw->func_caps.rss) {
4794                 pf->lan_nb_qps = 1;
4795         } else {
4796                 pf->flags |= I40E_FLAG_RSS;
4797                 if (hw->mac.type == I40E_MAC_X722)
4798                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4799                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4800         }
4801         qp_count += pf->lan_nb_qps;
4802         vsi_count += 1;
4803
4804         /* VF queue/VSI allocation */
4805         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4806         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4807                 pf->flags |= I40E_FLAG_SRIOV;
4808                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4809                 pf->vf_num = pci_dev->max_vfs;
4810                 PMD_DRV_LOG(DEBUG,
4811                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4812                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4813         } else {
4814                 pf->vf_nb_qps = 0;
4815                 pf->vf_num = 0;
4816         }
4817         qp_count += pf->vf_nb_qps * pf->vf_num;
4818         vsi_count += pf->vf_num;
4819
4820         /* VMDq queue/VSI allocation */
4821         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4822         pf->vmdq_nb_qps = 0;
4823         pf->max_nb_vmdq_vsi = 0;
4824         if (hw->func_caps.vmdq) {
4825                 if (qp_count < hw->func_caps.num_tx_qp &&
4826                         vsi_count < hw->func_caps.num_vsis) {
4827                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4828                                 qp_count) / pf->vmdq_nb_qp_max;
4829
4830                         /* Limit the maximum number of VMDq vsi to the maximum
4831                          * ethdev can support
4832                          */
4833                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4834                                 hw->func_caps.num_vsis - vsi_count);
4835                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4836                                 ETH_64_POOLS);
4837                         if (pf->max_nb_vmdq_vsi) {
4838                                 pf->flags |= I40E_FLAG_VMDQ;
4839                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4840                                 PMD_DRV_LOG(DEBUG,
4841                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4842                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4843                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4844                         } else {
4845                                 PMD_DRV_LOG(INFO,
4846                                         "No enough queues left for VMDq");
4847                         }
4848                 } else {
4849                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4850                 }
4851         }
4852         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4853         vsi_count += pf->max_nb_vmdq_vsi;
4854
4855         if (hw->func_caps.dcb)
4856                 pf->flags |= I40E_FLAG_DCB;
4857
4858         if (qp_count > hw->func_caps.num_tx_qp) {
4859                 PMD_DRV_LOG(ERR,
4860                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4861                         qp_count, hw->func_caps.num_tx_qp);
4862                 return -EINVAL;
4863         }
4864         if (vsi_count > hw->func_caps.num_vsis) {
4865                 PMD_DRV_LOG(ERR,
4866                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4867                         vsi_count, hw->func_caps.num_vsis);
4868                 return -EINVAL;
4869         }
4870
4871         return 0;
4872 }
4873
4874 static int
4875 i40e_pf_get_switch_config(struct i40e_pf *pf)
4876 {
4877         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4878         struct i40e_aqc_get_switch_config_resp *switch_config;
4879         struct i40e_aqc_switch_config_element_resp *element;
4880         uint16_t start_seid = 0, num_reported;
4881         int ret;
4882
4883         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4884                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4885         if (!switch_config) {
4886                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4887                 return -ENOMEM;
4888         }
4889
4890         /* Get the switch configurations */
4891         ret = i40e_aq_get_switch_config(hw, switch_config,
4892                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4893         if (ret != I40E_SUCCESS) {
4894                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4895                 goto fail;
4896         }
4897         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4898         if (num_reported != 1) { /* The number should be 1 */
4899                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4900                 goto fail;
4901         }
4902
4903         /* Parse the switch configuration elements */
4904         element = &(switch_config->element[0]);
4905         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4906                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4907                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4908         } else
4909                 PMD_DRV_LOG(INFO, "Unknown element type");
4910
4911 fail:
4912         rte_free(switch_config);
4913
4914         return ret;
4915 }
4916
4917 static int
4918 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4919                         uint32_t num)
4920 {
4921         struct pool_entry *entry;
4922
4923         if (pool == NULL || num == 0)
4924                 return -EINVAL;
4925
4926         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4927         if (entry == NULL) {
4928                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4929                 return -ENOMEM;
4930         }
4931
4932         /* queue heap initialize */
4933         pool->num_free = num;
4934         pool->num_alloc = 0;
4935         pool->base = base;
4936         LIST_INIT(&pool->alloc_list);
4937         LIST_INIT(&pool->free_list);
4938
4939         /* Initialize element  */
4940         entry->base = 0;
4941         entry->len = num;
4942
4943         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4944         return 0;
4945 }
4946
4947 static void
4948 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4949 {
4950         struct pool_entry *entry, *next_entry;
4951
4952         if (pool == NULL)
4953                 return;
4954
4955         for (entry = LIST_FIRST(&pool->alloc_list);
4956                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4957                         entry = next_entry) {
4958                 LIST_REMOVE(entry, next);
4959                 rte_free(entry);
4960         }
4961
4962         for (entry = LIST_FIRST(&pool->free_list);
4963                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4964                         entry = next_entry) {
4965                 LIST_REMOVE(entry, next);
4966                 rte_free(entry);
4967         }
4968
4969         pool->num_free = 0;
4970         pool->num_alloc = 0;
4971         pool->base = 0;
4972         LIST_INIT(&pool->alloc_list);
4973         LIST_INIT(&pool->free_list);
4974 }
4975
4976 static int
4977 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4978                        uint32_t base)
4979 {
4980         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4981         uint32_t pool_offset;
4982         uint16_t len;
4983         int insert;
4984
4985         if (pool == NULL) {
4986                 PMD_DRV_LOG(ERR, "Invalid parameter");
4987                 return -EINVAL;
4988         }
4989
4990         pool_offset = base - pool->base;
4991         /* Lookup in alloc list */
4992         LIST_FOREACH(entry, &pool->alloc_list, next) {
4993                 if (entry->base == pool_offset) {
4994                         valid_entry = entry;
4995                         LIST_REMOVE(entry, next);
4996                         break;
4997                 }
4998         }
4999
5000         /* Not find, return */
5001         if (valid_entry == NULL) {
5002                 PMD_DRV_LOG(ERR, "Failed to find entry");
5003                 return -EINVAL;
5004         }
5005
5006         /**
5007          * Found it, move it to free list  and try to merge.
5008          * In order to make merge easier, always sort it by qbase.
5009          * Find adjacent prev and last entries.
5010          */
5011         prev = next = NULL;
5012         LIST_FOREACH(entry, &pool->free_list, next) {
5013                 if (entry->base > valid_entry->base) {
5014                         next = entry;
5015                         break;
5016                 }
5017                 prev = entry;
5018         }
5019
5020         insert = 0;
5021         len = valid_entry->len;
5022         /* Try to merge with next one*/
5023         if (next != NULL) {
5024                 /* Merge with next one */
5025                 if (valid_entry->base + len == next->base) {
5026                         next->base = valid_entry->base;
5027                         next->len += len;
5028                         rte_free(valid_entry);
5029                         valid_entry = next;
5030                         insert = 1;
5031                 }
5032         }
5033
5034         if (prev != NULL) {
5035                 /* Merge with previous one */
5036                 if (prev->base + prev->len == valid_entry->base) {
5037                         prev->len += len;
5038                         /* If it merge with next one, remove next node */
5039                         if (insert == 1) {
5040                                 LIST_REMOVE(valid_entry, next);
5041                                 rte_free(valid_entry);
5042                                 valid_entry = NULL;
5043                         } else {
5044                                 rte_free(valid_entry);
5045                                 valid_entry = NULL;
5046                                 insert = 1;
5047                         }
5048                 }
5049         }
5050
5051         /* Not find any entry to merge, insert */
5052         if (insert == 0) {
5053                 if (prev != NULL)
5054                         LIST_INSERT_AFTER(prev, valid_entry, next);
5055                 else if (next != NULL)
5056                         LIST_INSERT_BEFORE(next, valid_entry, next);
5057                 else /* It's empty list, insert to head */
5058                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5059         }
5060
5061         pool->num_free += len;
5062         pool->num_alloc -= len;
5063
5064         return 0;
5065 }
5066
5067 static int
5068 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5069                        uint16_t num)
5070 {
5071         struct pool_entry *entry, *valid_entry;
5072
5073         if (pool == NULL || num == 0) {
5074                 PMD_DRV_LOG(ERR, "Invalid parameter");
5075                 return -EINVAL;
5076         }
5077
5078         if (pool->num_free < num) {
5079                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5080                             num, pool->num_free);
5081                 return -ENOMEM;
5082         }
5083
5084         valid_entry = NULL;
5085         /* Lookup  in free list and find most fit one */
5086         LIST_FOREACH(entry, &pool->free_list, next) {
5087                 if (entry->len >= num) {
5088                         /* Find best one */
5089                         if (entry->len == num) {
5090                                 valid_entry = entry;
5091                                 break;
5092                         }
5093                         if (valid_entry == NULL || valid_entry->len > entry->len)
5094                                 valid_entry = entry;
5095                 }
5096         }
5097
5098         /* Not find one to satisfy the request, return */
5099         if (valid_entry == NULL) {
5100                 PMD_DRV_LOG(ERR, "No valid entry found");
5101                 return -ENOMEM;
5102         }
5103         /**
5104          * The entry have equal queue number as requested,
5105          * remove it from alloc_list.
5106          */
5107         if (valid_entry->len == num) {
5108                 LIST_REMOVE(valid_entry, next);
5109         } else {
5110                 /**
5111                  * The entry have more numbers than requested,
5112                  * create a new entry for alloc_list and minus its
5113                  * queue base and number in free_list.
5114                  */
5115                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5116                 if (entry == NULL) {
5117                         PMD_DRV_LOG(ERR,
5118                                 "Failed to allocate memory for resource pool");
5119                         return -ENOMEM;
5120                 }
5121                 entry->base = valid_entry->base;
5122                 entry->len = num;
5123                 valid_entry->base += num;
5124                 valid_entry->len -= num;
5125                 valid_entry = entry;
5126         }
5127
5128         /* Insert it into alloc list, not sorted */
5129         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5130
5131         pool->num_free -= valid_entry->len;
5132         pool->num_alloc += valid_entry->len;
5133
5134         return valid_entry->base + pool->base;
5135 }
5136
5137 /**
5138  * bitmap_is_subset - Check whether src2 is subset of src1
5139  **/
5140 static inline int
5141 bitmap_is_subset(uint8_t src1, uint8_t src2)
5142 {
5143         return !((src1 ^ src2) & src2);
5144 }
5145
5146 static enum i40e_status_code
5147 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5148 {
5149         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5150
5151         /* If DCB is not supported, only default TC is supported */
5152         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5153                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5154                 return I40E_NOT_SUPPORTED;
5155         }
5156
5157         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5158                 PMD_DRV_LOG(ERR,
5159                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
5160                         hw->func_caps.enabled_tcmap, enabled_tcmap);
5161                 return I40E_NOT_SUPPORTED;
5162         }
5163         return I40E_SUCCESS;
5164 }
5165
5166 int
5167 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5168                                 struct i40e_vsi_vlan_pvid_info *info)
5169 {
5170         struct i40e_hw *hw;
5171         struct i40e_vsi_context ctxt;
5172         uint8_t vlan_flags = 0;
5173         int ret;
5174
5175         if (vsi == NULL || info == NULL) {
5176                 PMD_DRV_LOG(ERR, "invalid parameters");
5177                 return I40E_ERR_PARAM;
5178         }
5179
5180         if (info->on) {
5181                 vsi->info.pvid = info->config.pvid;
5182                 /**
5183                  * If insert pvid is enabled, only tagged pkts are
5184                  * allowed to be sent out.
5185                  */
5186                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5187                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5188         } else {
5189                 vsi->info.pvid = 0;
5190                 if (info->config.reject.tagged == 0)
5191                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5192
5193                 if (info->config.reject.untagged == 0)
5194                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5195         }
5196         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5197                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
5198         vsi->info.port_vlan_flags |= vlan_flags;
5199         vsi->info.valid_sections =
5200                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5201         memset(&ctxt, 0, sizeof(ctxt));
5202         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5203         ctxt.seid = vsi->seid;
5204
5205         hw = I40E_VSI_TO_HW(vsi);
5206         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5207         if (ret != I40E_SUCCESS)
5208                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
5209
5210         return ret;
5211 }
5212
5213 static int
5214 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5215 {
5216         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5217         int i, ret;
5218         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5219
5220         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5221         if (ret != I40E_SUCCESS)
5222                 return ret;
5223
5224         if (!vsi->seid) {
5225                 PMD_DRV_LOG(ERR, "seid not valid");
5226                 return -EINVAL;
5227         }
5228
5229         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5230         tc_bw_data.tc_valid_bits = enabled_tcmap;
5231         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5232                 tc_bw_data.tc_bw_credits[i] =
5233                         (enabled_tcmap & (1 << i)) ? 1 : 0;
5234
5235         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5236         if (ret != I40E_SUCCESS) {
5237                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5238                 return ret;
5239         }
5240
5241         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5242                                         sizeof(vsi->info.qs_handle));
5243         return I40E_SUCCESS;
5244 }
5245
5246 static enum i40e_status_code
5247 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5248                                  struct i40e_aqc_vsi_properties_data *info,
5249                                  uint8_t enabled_tcmap)
5250 {
5251         enum i40e_status_code ret;
5252         int i, total_tc = 0;
5253         uint16_t qpnum_per_tc, bsf, qp_idx;
5254
5255         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5256         if (ret != I40E_SUCCESS)
5257                 return ret;
5258
5259         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5260                 if (enabled_tcmap & (1 << i))
5261                         total_tc++;
5262         if (total_tc == 0)
5263                 total_tc = 1;
5264         vsi->enabled_tc = enabled_tcmap;
5265
5266         /* Number of queues per enabled TC */
5267         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5268         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5269         bsf = rte_bsf32(qpnum_per_tc);
5270
5271         /* Adjust the queue number to actual queues that can be applied */
5272         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5273                 vsi->nb_qps = qpnum_per_tc * total_tc;
5274
5275         /**
5276          * Configure TC and queue mapping parameters, for enabled TC,
5277          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5278          * default queue will serve it.
5279          */
5280         qp_idx = 0;
5281         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5282                 if (vsi->enabled_tc & (1 << i)) {
5283                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5284                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5285                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5286                         qp_idx += qpnum_per_tc;
5287                 } else
5288                         info->tc_mapping[i] = 0;
5289         }
5290
5291         /* Associate queue number with VSI */
5292         if (vsi->type == I40E_VSI_SRIOV) {
5293                 info->mapping_flags |=
5294                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5295                 for (i = 0; i < vsi->nb_qps; i++)
5296                         info->queue_mapping[i] =
5297                                 rte_cpu_to_le_16(vsi->base_queue + i);
5298         } else {
5299                 info->mapping_flags |=
5300                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5301                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5302         }
5303         info->valid_sections |=
5304                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5305
5306         return I40E_SUCCESS;
5307 }
5308
5309 static int
5310 i40e_veb_release(struct i40e_veb *veb)
5311 {
5312         struct i40e_vsi *vsi;
5313         struct i40e_hw *hw;
5314
5315         if (veb == NULL)
5316                 return -EINVAL;
5317
5318         if (!TAILQ_EMPTY(&veb->head)) {
5319                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5320                 return -EACCES;
5321         }
5322         /* associate_vsi field is NULL for floating VEB */
5323         if (veb->associate_vsi != NULL) {
5324                 vsi = veb->associate_vsi;
5325                 hw = I40E_VSI_TO_HW(vsi);
5326
5327                 vsi->uplink_seid = veb->uplink_seid;
5328                 vsi->veb = NULL;
5329         } else {
5330                 veb->associate_pf->main_vsi->floating_veb = NULL;
5331                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5332         }
5333
5334         i40e_aq_delete_element(hw, veb->seid, NULL);
5335         rte_free(veb);
5336         return I40E_SUCCESS;
5337 }
5338
5339 /* Setup a veb */
5340 static struct i40e_veb *
5341 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5342 {
5343         struct i40e_veb *veb;
5344         int ret;
5345         struct i40e_hw *hw;
5346
5347         if (pf == NULL) {
5348                 PMD_DRV_LOG(ERR,
5349                             "veb setup failed, associated PF shouldn't null");
5350                 return NULL;
5351         }
5352         hw = I40E_PF_TO_HW(pf);
5353
5354         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5355         if (!veb) {
5356                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5357                 goto fail;
5358         }
5359
5360         veb->associate_vsi = vsi;
5361         veb->associate_pf = pf;
5362         TAILQ_INIT(&veb->head);
5363         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5364
5365         /* create floating veb if vsi is NULL */
5366         if (vsi != NULL) {
5367                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5368                                       I40E_DEFAULT_TCMAP, false,
5369                                       &veb->seid, false, NULL);
5370         } else {
5371                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5372                                       true, &veb->seid, false, NULL);
5373         }
5374
5375         if (ret != I40E_SUCCESS) {
5376                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5377                             hw->aq.asq_last_status);
5378                 goto fail;
5379         }
5380         veb->enabled_tc = I40E_DEFAULT_TCMAP;
5381
5382         /* get statistics index */
5383         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5384                                 &veb->stats_idx, NULL, NULL, NULL);
5385         if (ret != I40E_SUCCESS) {
5386                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5387                             hw->aq.asq_last_status);
5388                 goto fail;
5389         }
5390         /* Get VEB bandwidth, to be implemented */
5391         /* Now associated vsi binding to the VEB, set uplink to this VEB */
5392         if (vsi)
5393                 vsi->uplink_seid = veb->seid;
5394
5395         return veb;
5396 fail:
5397         rte_free(veb);
5398         return NULL;
5399 }
5400
5401 int
5402 i40e_vsi_release(struct i40e_vsi *vsi)
5403 {
5404         struct i40e_pf *pf;
5405         struct i40e_hw *hw;
5406         struct i40e_vsi_list *vsi_list;
5407         void *temp;
5408         int ret;
5409         struct i40e_mac_filter *f;
5410         uint16_t user_param;
5411
5412         if (!vsi)
5413                 return I40E_SUCCESS;
5414
5415         if (!vsi->adapter)
5416                 return -EFAULT;
5417
5418         user_param = vsi->user_param;
5419
5420         pf = I40E_VSI_TO_PF(vsi);
5421         hw = I40E_VSI_TO_HW(vsi);
5422
5423         /* VSI has child to attach, release child first */
5424         if (vsi->veb) {
5425                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5426                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5427                                 return -1;
5428                 }
5429                 i40e_veb_release(vsi->veb);
5430         }
5431
5432         if (vsi->floating_veb) {
5433                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5434                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5435                                 return -1;
5436                 }
5437         }
5438
5439         /* Remove all macvlan filters of the VSI */
5440         i40e_vsi_remove_all_macvlan_filter(vsi);
5441         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5442                 rte_free(f);
5443
5444         if (vsi->type != I40E_VSI_MAIN &&
5445             ((vsi->type != I40E_VSI_SRIOV) ||
5446             !pf->floating_veb_list[user_param])) {
5447                 /* Remove vsi from parent's sibling list */
5448                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5449                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5450                         return I40E_ERR_PARAM;
5451                 }
5452                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5453                                 &vsi->sib_vsi_list, list);
5454
5455                 /* Remove all switch element of the VSI */
5456                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5457                 if (ret != I40E_SUCCESS)
5458                         PMD_DRV_LOG(ERR, "Failed to delete element");
5459         }
5460
5461         if ((vsi->type == I40E_VSI_SRIOV) &&
5462             pf->floating_veb_list[user_param]) {
5463                 /* Remove vsi from parent's sibling list */
5464                 if (vsi->parent_vsi == NULL ||
5465                     vsi->parent_vsi->floating_veb == NULL) {
5466                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5467                         return I40E_ERR_PARAM;
5468                 }
5469                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5470                              &vsi->sib_vsi_list, list);
5471
5472                 /* Remove all switch element of the VSI */
5473                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5474                 if (ret != I40E_SUCCESS)
5475                         PMD_DRV_LOG(ERR, "Failed to delete element");
5476         }
5477
5478         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5479
5480         if (vsi->type != I40E_VSI_SRIOV)
5481                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5482         rte_free(vsi);
5483
5484         return I40E_SUCCESS;
5485 }
5486
5487 static int
5488 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5489 {
5490         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5491         struct i40e_aqc_remove_macvlan_element_data def_filter;
5492         struct i40e_mac_filter_info filter;
5493         int ret;
5494
5495         if (vsi->type != I40E_VSI_MAIN)
5496                 return I40E_ERR_CONFIG;
5497         memset(&def_filter, 0, sizeof(def_filter));
5498         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5499                                         ETH_ADDR_LEN);
5500         def_filter.vlan_tag = 0;
5501         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5502                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5503         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5504         if (ret != I40E_SUCCESS) {
5505                 struct i40e_mac_filter *f;
5506                 struct rte_ether_addr *mac;
5507
5508                 PMD_DRV_LOG(DEBUG,
5509                             "Cannot remove the default macvlan filter");
5510                 /* It needs to add the permanent mac into mac list */
5511                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5512                 if (f == NULL) {
5513                         PMD_DRV_LOG(ERR, "failed to allocate memory");
5514                         return I40E_ERR_NO_MEMORY;
5515                 }
5516                 mac = &f->mac_info.mac_addr;
5517                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5518                                 ETH_ADDR_LEN);
5519                 f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5520                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5521                 vsi->mac_num++;
5522
5523                 return ret;
5524         }
5525         rte_memcpy(&filter.mac_addr,
5526                 (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5527         filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5528         return i40e_vsi_add_mac(vsi, &filter);
5529 }
5530
5531 /*
5532  * i40e_vsi_get_bw_config - Query VSI BW Information
5533  * @vsi: the VSI to be queried
5534  *
5535  * Returns 0 on success, negative value on failure
5536  */
5537 static enum i40e_status_code
5538 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5539 {
5540         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5541         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5542         struct i40e_hw *hw = &vsi->adapter->hw;
5543         i40e_status ret;
5544         int i;
5545         uint32_t bw_max;
5546
5547         memset(&bw_config, 0, sizeof(bw_config));
5548         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5549         if (ret != I40E_SUCCESS) {
5550                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5551                             hw->aq.asq_last_status);
5552                 return ret;
5553         }
5554
5555         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5556         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5557                                         &ets_sla_config, NULL);
5558         if (ret != I40E_SUCCESS) {
5559                 PMD_DRV_LOG(ERR,
5560                         "VSI failed to get TC bandwdith configuration %u",
5561                         hw->aq.asq_last_status);
5562                 return ret;
5563         }
5564
5565         /* store and print out BW info */
5566         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5567         vsi->bw_info.bw_max = bw_config.max_bw;
5568         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5569         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5570         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5571                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5572                      I40E_16_BIT_WIDTH);
5573         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5574                 vsi->bw_info.bw_ets_share_credits[i] =
5575                                 ets_sla_config.share_credits[i];
5576                 vsi->bw_info.bw_ets_credits[i] =
5577                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5578                 /* 4 bits per TC, 4th bit is reserved */
5579                 vsi->bw_info.bw_ets_max[i] =
5580                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5581                                   RTE_LEN2MASK(3, uint8_t));
5582                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5583                             vsi->bw_info.bw_ets_share_credits[i]);
5584                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5585                             vsi->bw_info.bw_ets_credits[i]);
5586                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5587                             vsi->bw_info.bw_ets_max[i]);
5588         }
5589
5590         return I40E_SUCCESS;
5591 }
5592
5593 /* i40e_enable_pf_lb
5594  * @pf: pointer to the pf structure
5595  *
5596  * allow loopback on pf
5597  */
5598 static inline void
5599 i40e_enable_pf_lb(struct i40e_pf *pf)
5600 {
5601         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5602         struct i40e_vsi_context ctxt;
5603         int ret;
5604
5605         /* Use the FW API if FW >= v5.0 */
5606         if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5607                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5608                 return;
5609         }
5610
5611         memset(&ctxt, 0, sizeof(ctxt));
5612         ctxt.seid = pf->main_vsi_seid;
5613         ctxt.pf_num = hw->pf_id;
5614         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5615         if (ret) {
5616                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5617                             ret, hw->aq.asq_last_status);
5618                 return;
5619         }
5620         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5621         ctxt.info.valid_sections =
5622                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5623         ctxt.info.switch_id |=
5624                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5625
5626         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5627         if (ret)
5628                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5629                             hw->aq.asq_last_status);
5630 }
5631
5632 /* Setup a VSI */
5633 struct i40e_vsi *
5634 i40e_vsi_setup(struct i40e_pf *pf,
5635                enum i40e_vsi_type type,
5636                struct i40e_vsi *uplink_vsi,
5637                uint16_t user_param)
5638 {
5639         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5640         struct i40e_vsi *vsi;
5641         struct i40e_mac_filter_info filter;
5642         int ret;
5643         struct i40e_vsi_context ctxt;
5644         struct rte_ether_addr broadcast =
5645                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5646
5647         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5648             uplink_vsi == NULL) {
5649                 PMD_DRV_LOG(ERR,
5650                         "VSI setup failed, VSI link shouldn't be NULL");
5651                 return NULL;
5652         }
5653
5654         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5655                 PMD_DRV_LOG(ERR,
5656                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5657                 return NULL;
5658         }
5659
5660         /* two situations
5661          * 1.type is not MAIN and uplink vsi is not NULL
5662          * If uplink vsi didn't setup VEB, create one first under veb field
5663          * 2.type is SRIOV and the uplink is NULL
5664          * If floating VEB is NULL, create one veb under floating veb field
5665          */
5666
5667         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5668             uplink_vsi->veb == NULL) {
5669                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5670
5671                 if (uplink_vsi->veb == NULL) {
5672                         PMD_DRV_LOG(ERR, "VEB setup failed");
5673                         return NULL;
5674                 }
5675                 /* set ALLOWLOOPBACk on pf, when veb is created */
5676                 i40e_enable_pf_lb(pf);
5677         }
5678
5679         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5680             pf->main_vsi->floating_veb == NULL) {
5681                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5682
5683                 if (pf->main_vsi->floating_veb == NULL) {
5684                         PMD_DRV_LOG(ERR, "VEB setup failed");
5685                         return NULL;
5686                 }
5687         }
5688
5689         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5690         if (!vsi) {
5691                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5692                 return NULL;
5693         }
5694         TAILQ_INIT(&vsi->mac_list);
5695         vsi->type = type;
5696         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5697         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5698         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5699         vsi->user_param = user_param;
5700         vsi->vlan_anti_spoof_on = 0;
5701         vsi->vlan_filter_on = 0;
5702         /* Allocate queues */
5703         switch (vsi->type) {
5704         case I40E_VSI_MAIN  :
5705                 vsi->nb_qps = pf->lan_nb_qps;
5706                 break;
5707         case I40E_VSI_SRIOV :
5708                 vsi->nb_qps = pf->vf_nb_qps;
5709                 break;
5710         case I40E_VSI_VMDQ2:
5711                 vsi->nb_qps = pf->vmdq_nb_qps;
5712                 break;
5713         case I40E_VSI_FDIR:
5714                 vsi->nb_qps = pf->fdir_nb_qps;
5715                 break;
5716         default:
5717                 goto fail_mem;
5718         }
5719         /*
5720          * The filter status descriptor is reported in rx queue 0,
5721          * while the tx queue for fdir filter programming has no
5722          * such constraints, can be non-zero queues.
5723          * To simplify it, choose FDIR vsi use queue 0 pair.
5724          * To make sure it will use queue 0 pair, queue allocation
5725          * need be done before this function is called
5726          */
5727         if (type != I40E_VSI_FDIR) {
5728                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5729                         if (ret < 0) {
5730                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5731                                                 vsi->seid, ret);
5732                                 goto fail_mem;
5733                         }
5734                         vsi->base_queue = ret;
5735         } else
5736                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5737
5738         /* VF has MSIX interrupt in VF range, don't allocate here */
5739         if (type == I40E_VSI_MAIN) {
5740                 if (pf->support_multi_driver) {
5741                         /* If support multi-driver, need to use INT0 instead of
5742                          * allocating from msix pool. The Msix pool is init from
5743                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5744                          * to 1 without calling i40e_res_pool_alloc.
5745                          */
5746                         vsi->msix_intr = 0;
5747                         vsi->nb_msix = 1;
5748                 } else {
5749                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5750                                                   RTE_MIN(vsi->nb_qps,
5751                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5752                         if (ret < 0) {
5753                                 PMD_DRV_LOG(ERR,
5754                                             "VSI MAIN %d get heap failed %d",
5755                                             vsi->seid, ret);
5756                                 goto fail_queue_alloc;
5757                         }
5758                         vsi->msix_intr = ret;
5759                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5760                                                RTE_MAX_RXTX_INTR_VEC_ID);
5761                 }
5762         } else if (type != I40E_VSI_SRIOV) {
5763                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5764                 if (ret < 0) {
5765                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5766                         if (type != I40E_VSI_FDIR)
5767                                 goto fail_queue_alloc;
5768                         vsi->msix_intr = 0;
5769                         vsi->nb_msix = 0;
5770                 } else {
5771                         vsi->msix_intr = ret;
5772                         vsi->nb_msix = 1;
5773                 }
5774         } else {
5775                 vsi->msix_intr = 0;
5776                 vsi->nb_msix = 0;
5777         }
5778
5779         /* Add VSI */
5780         if (type == I40E_VSI_MAIN) {
5781                 /* For main VSI, no need to add since it's default one */
5782                 vsi->uplink_seid = pf->mac_seid;
5783                 vsi->seid = pf->main_vsi_seid;
5784                 /* Bind queues with specific MSIX interrupt */
5785                 /**
5786                  * Needs 2 interrupt at least, one for misc cause which will
5787                  * enabled from OS side, Another for queues binding the
5788                  * interrupt from device side only.
5789                  */
5790
5791                 /* Get default VSI parameters from hardware */
5792                 memset(&ctxt, 0, sizeof(ctxt));
5793                 ctxt.seid = vsi->seid;
5794                 ctxt.pf_num = hw->pf_id;
5795                 ctxt.uplink_seid = vsi->uplink_seid;
5796                 ctxt.vf_num = 0;
5797                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5798                 if (ret != I40E_SUCCESS) {
5799                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5800                         goto fail_msix_alloc;
5801                 }
5802                 rte_memcpy(&vsi->info, &ctxt.info,
5803                         sizeof(struct i40e_aqc_vsi_properties_data));
5804                 vsi->vsi_id = ctxt.vsi_number;
5805                 vsi->info.valid_sections = 0;
5806
5807                 /* Configure tc, enabled TC0 only */
5808                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5809                         I40E_SUCCESS) {
5810                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5811                         goto fail_msix_alloc;
5812                 }
5813
5814                 /* TC, queue mapping */
5815                 memset(&ctxt, 0, sizeof(ctxt));
5816                 vsi->info.valid_sections |=
5817                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5818                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5819                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5820                 rte_memcpy(&ctxt.info, &vsi->info,
5821                         sizeof(struct i40e_aqc_vsi_properties_data));
5822                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5823                                                 I40E_DEFAULT_TCMAP);
5824                 if (ret != I40E_SUCCESS) {
5825                         PMD_DRV_LOG(ERR,
5826                                 "Failed to configure TC queue mapping");
5827                         goto fail_msix_alloc;
5828                 }
5829                 ctxt.seid = vsi->seid;
5830                 ctxt.pf_num = hw->pf_id;
5831                 ctxt.uplink_seid = vsi->uplink_seid;
5832                 ctxt.vf_num = 0;
5833
5834                 /* Update VSI parameters */
5835                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5836                 if (ret != I40E_SUCCESS) {
5837                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5838                         goto fail_msix_alloc;
5839                 }
5840
5841                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5842                                                 sizeof(vsi->info.tc_mapping));
5843                 rte_memcpy(&vsi->info.queue_mapping,
5844                                 &ctxt.info.queue_mapping,
5845                         sizeof(vsi->info.queue_mapping));
5846                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5847                 vsi->info.valid_sections = 0;
5848
5849                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5850                                 ETH_ADDR_LEN);
5851
5852                 /**
5853                  * Updating default filter settings are necessary to prevent
5854                  * reception of tagged packets.
5855                  * Some old firmware configurations load a default macvlan
5856                  * filter which accepts both tagged and untagged packets.
5857                  * The updating is to use a normal filter instead if needed.
5858                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5859                  * The firmware with correct configurations load the default
5860                  * macvlan filter which is expected and cannot be removed.
5861                  */
5862                 i40e_update_default_filter_setting(vsi);
5863                 i40e_config_qinq(hw, vsi);
5864         } else if (type == I40E_VSI_SRIOV) {
5865                 memset(&ctxt, 0, sizeof(ctxt));
5866                 /**
5867                  * For other VSI, the uplink_seid equals to uplink VSI's
5868                  * uplink_seid since they share same VEB
5869                  */
5870                 if (uplink_vsi == NULL)
5871                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5872                 else
5873                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5874                 ctxt.pf_num = hw->pf_id;
5875                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5876                 ctxt.uplink_seid = vsi->uplink_seid;
5877                 ctxt.connection_type = 0x1;
5878                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5879
5880                 /* Use the VEB configuration if FW >= v5.0 */
5881                 if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
5882                         /* Configure switch ID */
5883                         ctxt.info.valid_sections |=
5884                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5885                         ctxt.info.switch_id =
5886                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5887                 }
5888
5889                 /* Configure port/vlan */
5890                 ctxt.info.valid_sections |=
5891                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5892                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5893                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5894                                                 hw->func_caps.enabled_tcmap);
5895                 if (ret != I40E_SUCCESS) {
5896                         PMD_DRV_LOG(ERR,
5897                                 "Failed to configure TC queue mapping");
5898                         goto fail_msix_alloc;
5899                 }
5900
5901                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5902                 ctxt.info.valid_sections |=
5903                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5904                 /**
5905                  * Since VSI is not created yet, only configure parameter,
5906                  * will add vsi below.
5907                  */
5908
5909                 i40e_config_qinq(hw, vsi);
5910         } else if (type == I40E_VSI_VMDQ2) {
5911                 memset(&ctxt, 0, sizeof(ctxt));
5912                 /*
5913                  * For other VSI, the uplink_seid equals to uplink VSI's
5914                  * uplink_seid since they share same VEB
5915                  */
5916                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5917                 ctxt.pf_num = hw->pf_id;
5918                 ctxt.vf_num = 0;
5919                 ctxt.uplink_seid = vsi->uplink_seid;
5920                 ctxt.connection_type = 0x1;
5921                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5922
5923                 ctxt.info.valid_sections |=
5924                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5925                 /* user_param carries flag to enable loop back */
5926                 if (user_param) {
5927                         ctxt.info.switch_id =
5928                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5929                         ctxt.info.switch_id |=
5930                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5931                 }
5932
5933                 /* Configure port/vlan */
5934                 ctxt.info.valid_sections |=
5935                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5936                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5937                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5938                                                 I40E_DEFAULT_TCMAP);
5939                 if (ret != I40E_SUCCESS) {
5940                         PMD_DRV_LOG(ERR,
5941                                 "Failed to configure TC queue mapping");
5942                         goto fail_msix_alloc;
5943                 }
5944                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5945                 ctxt.info.valid_sections |=
5946                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5947         } else if (type == I40E_VSI_FDIR) {
5948                 memset(&ctxt, 0, sizeof(ctxt));
5949                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5950                 ctxt.pf_num = hw->pf_id;
5951                 ctxt.vf_num = 0;
5952                 ctxt.uplink_seid = vsi->uplink_seid;
5953                 ctxt.connection_type = 0x1;     /* regular data port */
5954                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5955                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5956                                                 I40E_DEFAULT_TCMAP);
5957                 if (ret != I40E_SUCCESS) {
5958                         PMD_DRV_LOG(ERR,
5959                                 "Failed to configure TC queue mapping.");
5960                         goto fail_msix_alloc;
5961                 }
5962                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5963                 ctxt.info.valid_sections |=
5964                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5965         } else {
5966                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5967                 goto fail_msix_alloc;
5968         }
5969
5970         if (vsi->type != I40E_VSI_MAIN) {
5971                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5972                 if (ret != I40E_SUCCESS) {
5973                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5974                                     hw->aq.asq_last_status);
5975                         goto fail_msix_alloc;
5976                 }
5977                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5978                 vsi->info.valid_sections = 0;
5979                 vsi->seid = ctxt.seid;
5980                 vsi->vsi_id = ctxt.vsi_number;
5981                 vsi->sib_vsi_list.vsi = vsi;
5982                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5983                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5984                                           &vsi->sib_vsi_list, list);
5985                 } else {
5986                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5987                                           &vsi->sib_vsi_list, list);
5988                 }
5989         }
5990
5991         /* MAC/VLAN configuration */
5992         rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
5993         filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5994
5995         ret = i40e_vsi_add_mac(vsi, &filter);
5996         if (ret != I40E_SUCCESS) {
5997                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5998                 goto fail_msix_alloc;
5999         }
6000
6001         /* Get VSI BW information */
6002         i40e_vsi_get_bw_config(vsi);
6003         return vsi;
6004 fail_msix_alloc:
6005         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
6006 fail_queue_alloc:
6007         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
6008 fail_mem:
6009         rte_free(vsi);
6010         return NULL;
6011 }
6012
6013 /* Configure vlan filter on or off */
6014 int
6015 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
6016 {
6017         int i, num;
6018         struct i40e_mac_filter *f;
6019         void *temp;
6020         struct i40e_mac_filter_info *mac_filter;
6021         enum i40e_mac_filter_type desired_filter;
6022         int ret = I40E_SUCCESS;
6023
6024         if (on) {
6025                 /* Filter to match MAC and VLAN */
6026                 desired_filter = I40E_MACVLAN_PERFECT_MATCH;
6027         } else {
6028                 /* Filter to match only MAC */
6029                 desired_filter = I40E_MAC_PERFECT_MATCH;
6030         }
6031
6032         num = vsi->mac_num;
6033
6034         mac_filter = rte_zmalloc("mac_filter_info_data",
6035                                  num * sizeof(*mac_filter), 0);
6036         if (mac_filter == NULL) {
6037                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6038                 return I40E_ERR_NO_MEMORY;
6039         }
6040
6041         i = 0;
6042
6043         /* Remove all existing mac */
6044         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6045                 mac_filter[i] = f->mac_info;
6046                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6047                 if (ret) {
6048                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6049                                     on ? "enable" : "disable");
6050                         goto DONE;
6051                 }
6052                 i++;
6053         }
6054
6055         /* Override with new filter */
6056         for (i = 0; i < num; i++) {
6057                 mac_filter[i].filter_type = desired_filter;
6058                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6059                 if (ret) {
6060                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6061                                     on ? "enable" : "disable");
6062                         goto DONE;
6063                 }
6064         }
6065
6066 DONE:
6067         rte_free(mac_filter);
6068         return ret;
6069 }
6070
6071 /* Configure vlan stripping on or off */
6072 int
6073 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6074 {
6075         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6076         struct i40e_vsi_context ctxt;
6077         uint8_t vlan_flags;
6078         int ret = I40E_SUCCESS;
6079
6080         /* Check if it has been already on or off */
6081         if (vsi->info.valid_sections &
6082                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6083                 if (on) {
6084                         if ((vsi->info.port_vlan_flags &
6085                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6086                                 return 0; /* already on */
6087                 } else {
6088                         if ((vsi->info.port_vlan_flags &
6089                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6090                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
6091                                 return 0; /* already off */
6092                 }
6093         }
6094
6095         if (on)
6096                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6097         else
6098                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6099         vsi->info.valid_sections =
6100                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6101         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6102         vsi->info.port_vlan_flags |= vlan_flags;
6103         ctxt.seid = vsi->seid;
6104         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6105         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6106         if (ret)
6107                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6108                             on ? "enable" : "disable");
6109
6110         return ret;
6111 }
6112
6113 static int
6114 i40e_dev_init_vlan(struct rte_eth_dev *dev)
6115 {
6116         struct rte_eth_dev_data *data = dev->data;
6117         int ret;
6118         int mask = 0;
6119
6120         /* Apply vlan offload setting */
6121         mask = ETH_VLAN_STRIP_MASK |
6122                ETH_QINQ_STRIP_MASK |
6123                ETH_VLAN_FILTER_MASK |
6124                ETH_VLAN_EXTEND_MASK;
6125         ret = i40e_vlan_offload_set(dev, mask);
6126         if (ret) {
6127                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6128                 return ret;
6129         }
6130
6131         /* Apply pvid setting */
6132         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6133                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
6134         if (ret)
6135                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
6136
6137         return ret;
6138 }
6139
6140 static int
6141 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6142 {
6143         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6144
6145         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6146 }
6147
6148 static int
6149 i40e_update_flow_control(struct i40e_hw *hw)
6150 {
6151 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6152         struct i40e_link_status link_status;
6153         uint32_t rxfc = 0, txfc = 0, reg;
6154         uint8_t an_info;
6155         int ret;
6156
6157         memset(&link_status, 0, sizeof(link_status));
6158         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6159         if (ret != I40E_SUCCESS) {
6160                 PMD_DRV_LOG(ERR, "Failed to get link status information");
6161                 goto write_reg; /* Disable flow control */
6162         }
6163
6164         an_info = hw->phy.link_info.an_info;
6165         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6166                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6167                 ret = I40E_ERR_NOT_READY;
6168                 goto write_reg; /* Disable flow control */
6169         }
6170         /**
6171          * If link auto negotiation is enabled, flow control needs to
6172          * be configured according to it
6173          */
6174         switch (an_info & I40E_LINK_PAUSE_RXTX) {
6175         case I40E_LINK_PAUSE_RXTX:
6176                 rxfc = 1;
6177                 txfc = 1;
6178                 hw->fc.current_mode = I40E_FC_FULL;
6179                 break;
6180         case I40E_AQ_LINK_PAUSE_RX:
6181                 rxfc = 1;
6182                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
6183                 break;
6184         case I40E_AQ_LINK_PAUSE_TX:
6185                 txfc = 1;
6186                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
6187                 break;
6188         default:
6189                 hw->fc.current_mode = I40E_FC_NONE;
6190                 break;
6191         }
6192
6193 write_reg:
6194         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6195                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6196         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6197         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6198         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6199         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6200
6201         return ret;
6202 }
6203
6204 /* PF setup */
6205 static int
6206 i40e_pf_setup(struct i40e_pf *pf)
6207 {
6208         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6209         struct i40e_filter_control_settings settings;
6210         struct i40e_vsi *vsi;
6211         int ret;
6212
6213         /* Clear all stats counters */
6214         pf->offset_loaded = FALSE;
6215         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6216         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6217         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6218         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6219
6220         ret = i40e_pf_get_switch_config(pf);
6221         if (ret != I40E_SUCCESS) {
6222                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6223                 return ret;
6224         }
6225
6226         ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6227         if (ret)
6228                 PMD_INIT_LOG(WARNING,
6229                         "failed to allocate switch domain for device %d", ret);
6230
6231         if (pf->flags & I40E_FLAG_FDIR) {
6232                 /* make queue allocated first, let FDIR use queue pair 0*/
6233                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6234                 if (ret != I40E_FDIR_QUEUE_ID) {
6235                         PMD_DRV_LOG(ERR,
6236                                 "queue allocation fails for FDIR: ret =%d",
6237                                 ret);
6238                         pf->flags &= ~I40E_FLAG_FDIR;
6239                 }
6240         }
6241         /*  main VSI setup */
6242         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6243         if (!vsi) {
6244                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6245                 return I40E_ERR_NOT_READY;
6246         }
6247         pf->main_vsi = vsi;
6248
6249         /* Configure filter control */
6250         memset(&settings, 0, sizeof(settings));
6251         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6252                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6253         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6254                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6255         else {
6256                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6257                         hw->func_caps.rss_table_size);
6258                 return I40E_ERR_PARAM;
6259         }
6260         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6261                 hw->func_caps.rss_table_size);
6262         pf->hash_lut_size = hw->func_caps.rss_table_size;
6263
6264         /* Enable ethtype and macvlan filters */
6265         settings.enable_ethtype = TRUE;
6266         settings.enable_macvlan = TRUE;
6267         ret = i40e_set_filter_control(hw, &settings);
6268         if (ret)
6269                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6270                                                                 ret);
6271
6272         /* Update flow control according to the auto negotiation */
6273         i40e_update_flow_control(hw);
6274
6275         return I40E_SUCCESS;
6276 }
6277
6278 int
6279 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6280 {
6281         uint32_t reg;
6282         uint16_t j;
6283
6284         /**
6285          * Set or clear TX Queue Disable flags,
6286          * which is required by hardware.
6287          */
6288         i40e_pre_tx_queue_cfg(hw, q_idx, on);
6289         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6290
6291         /* Wait until the request is finished */
6292         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6293                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6294                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6295                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6296                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6297                                                         & 0x1))) {
6298                         break;
6299                 }
6300         }
6301         if (on) {
6302                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6303                         return I40E_SUCCESS; /* already on, skip next steps */
6304
6305                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6306                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6307         } else {
6308                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6309                         return I40E_SUCCESS; /* already off, skip next steps */
6310                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6311         }
6312         /* Write the register */
6313         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6314         /* Check the result */
6315         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6316                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6317                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6318                 if (on) {
6319                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6320                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
6321                                 break;
6322                 } else {
6323                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6324                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6325                                 break;
6326                 }
6327         }
6328         /* Check if it is timeout */
6329         if (j >= I40E_CHK_Q_ENA_COUNT) {
6330                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6331                             (on ? "enable" : "disable"), q_idx);
6332                 return I40E_ERR_TIMEOUT;
6333         }
6334
6335         return I40E_SUCCESS;
6336 }
6337
6338 int
6339 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6340 {
6341         uint32_t reg;
6342         uint16_t j;
6343
6344         /* Wait until the request is finished */
6345         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6346                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6347                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6348                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6349                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6350                         break;
6351         }
6352
6353         if (on) {
6354                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6355                         return I40E_SUCCESS; /* Already on, skip next steps */
6356                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6357         } else {
6358                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6359                         return I40E_SUCCESS; /* Already off, skip next steps */
6360                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6361         }
6362
6363         /* Write the register */
6364         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6365         /* Check the result */
6366         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6367                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6368                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6369                 if (on) {
6370                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6371                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
6372                                 break;
6373                 } else {
6374                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6375                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6376                                 break;
6377                 }
6378         }
6379
6380         /* Check if it is timeout */
6381         if (j >= I40E_CHK_Q_ENA_COUNT) {
6382                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6383                             (on ? "enable" : "disable"), q_idx);
6384                 return I40E_ERR_TIMEOUT;
6385         }
6386
6387         return I40E_SUCCESS;
6388 }
6389
6390 /* Initialize VSI for TX */
6391 static int
6392 i40e_dev_tx_init(struct i40e_pf *pf)
6393 {
6394         struct rte_eth_dev_data *data = pf->dev_data;
6395         uint16_t i;
6396         uint32_t ret = I40E_SUCCESS;
6397         struct i40e_tx_queue *txq;
6398
6399         for (i = 0; i < data->nb_tx_queues; i++) {
6400                 txq = data->tx_queues[i];
6401                 if (!txq || !txq->q_set)
6402                         continue;
6403                 ret = i40e_tx_queue_init(txq);
6404                 if (ret != I40E_SUCCESS)
6405                         break;
6406         }
6407         if (ret == I40E_SUCCESS)
6408                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6409                                      ->eth_dev);
6410
6411         return ret;
6412 }
6413
6414 /* Initialize VSI for RX */
6415 static int
6416 i40e_dev_rx_init(struct i40e_pf *pf)
6417 {
6418         struct rte_eth_dev_data *data = pf->dev_data;
6419         int ret = I40E_SUCCESS;
6420         uint16_t i;
6421         struct i40e_rx_queue *rxq;
6422
6423         i40e_pf_config_rss(pf);
6424         for (i = 0; i < data->nb_rx_queues; i++) {
6425                 rxq = data->rx_queues[i];
6426                 if (!rxq || !rxq->q_set)
6427                         continue;
6428
6429                 ret = i40e_rx_queue_init(rxq);
6430                 if (ret != I40E_SUCCESS) {
6431                         PMD_DRV_LOG(ERR,
6432                                 "Failed to do RX queue initialization");
6433                         break;
6434                 }
6435         }
6436         if (ret == I40E_SUCCESS)
6437                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6438                                      ->eth_dev);
6439
6440         return ret;
6441 }
6442
6443 static int
6444 i40e_dev_rxtx_init(struct i40e_pf *pf)
6445 {
6446         int err;
6447
6448         err = i40e_dev_tx_init(pf);
6449         if (err) {
6450                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6451                 return err;
6452         }
6453         err = i40e_dev_rx_init(pf);
6454         if (err) {
6455                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6456                 return err;
6457         }
6458
6459         return err;
6460 }
6461
6462 static int
6463 i40e_vmdq_setup(struct rte_eth_dev *dev)
6464 {
6465         struct rte_eth_conf *conf = &dev->data->dev_conf;
6466         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6467         int i, err, conf_vsis, j, loop;
6468         struct i40e_vsi *vsi;
6469         struct i40e_vmdq_info *vmdq_info;
6470         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6471         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6472
6473         /*
6474          * Disable interrupt to avoid message from VF. Furthermore, it will
6475          * avoid race condition in VSI creation/destroy.
6476          */
6477         i40e_pf_disable_irq0(hw);
6478
6479         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6480                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6481                 return -ENOTSUP;
6482         }
6483
6484         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6485         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6486                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6487                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6488                         pf->max_nb_vmdq_vsi);
6489                 return -ENOTSUP;
6490         }
6491
6492         if (pf->vmdq != NULL) {
6493                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6494                 return 0;
6495         }
6496
6497         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6498                                 sizeof(*vmdq_info) * conf_vsis, 0);
6499
6500         if (pf->vmdq == NULL) {
6501                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6502                 return -ENOMEM;
6503         }
6504
6505         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6506
6507         /* Create VMDQ VSI */
6508         for (i = 0; i < conf_vsis; i++) {
6509                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6510                                 vmdq_conf->enable_loop_back);
6511                 if (vsi == NULL) {
6512                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6513                         err = -1;
6514                         goto err_vsi_setup;
6515                 }
6516                 vmdq_info = &pf->vmdq[i];
6517                 vmdq_info->pf = pf;
6518                 vmdq_info->vsi = vsi;
6519         }
6520         pf->nb_cfg_vmdq_vsi = conf_vsis;
6521
6522         /* Configure Vlan */
6523         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6524         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6525                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6526                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6527                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6528                                         vmdq_conf->pool_map[i].vlan_id, j);
6529
6530                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6531                                                 vmdq_conf->pool_map[i].vlan_id);
6532                                 if (err) {
6533                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6534                                         err = -1;
6535                                         goto err_vsi_setup;
6536                                 }
6537                         }
6538                 }
6539         }
6540
6541         i40e_pf_enable_irq0(hw);
6542
6543         return 0;
6544
6545 err_vsi_setup:
6546         for (i = 0; i < conf_vsis; i++)
6547                 if (pf->vmdq[i].vsi == NULL)
6548                         break;
6549                 else
6550                         i40e_vsi_release(pf->vmdq[i].vsi);
6551
6552         rte_free(pf->vmdq);
6553         pf->vmdq = NULL;
6554         i40e_pf_enable_irq0(hw);
6555         return err;
6556 }
6557
6558 static void
6559 i40e_stat_update_32(struct i40e_hw *hw,
6560                    uint32_t reg,
6561                    bool offset_loaded,
6562                    uint64_t *offset,
6563                    uint64_t *stat)
6564 {
6565         uint64_t new_data;
6566
6567         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6568         if (!offset_loaded)
6569                 *offset = new_data;
6570
6571         if (new_data >= *offset)
6572                 *stat = (uint64_t)(new_data - *offset);
6573         else
6574                 *stat = (uint64_t)((new_data +
6575                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6576 }
6577
6578 static void
6579 i40e_stat_update_48(struct i40e_hw *hw,
6580                    uint32_t hireg,
6581                    uint32_t loreg,
6582                    bool offset_loaded,
6583                    uint64_t *offset,
6584                    uint64_t *stat)
6585 {
6586         uint64_t new_data;
6587
6588         if (hw->device_id == I40E_DEV_ID_QEMU) {
6589                 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6590                 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6591                                 I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6592         } else {
6593                 new_data = I40E_READ_REG64(hw, loreg);
6594         }
6595
6596         if (!offset_loaded)
6597                 *offset = new_data;
6598
6599         if (new_data >= *offset)
6600                 *stat = new_data - *offset;
6601         else
6602                 *stat = (uint64_t)((new_data +
6603                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6604
6605         *stat &= I40E_48_BIT_MASK;
6606 }
6607
6608 /* Disable IRQ0 */
6609 void
6610 i40e_pf_disable_irq0(struct i40e_hw *hw)
6611 {
6612         /* Disable all interrupt types */
6613         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6614                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6615         I40E_WRITE_FLUSH(hw);
6616 }
6617
6618 /* Enable IRQ0 */
6619 void
6620 i40e_pf_enable_irq0(struct i40e_hw *hw)
6621 {
6622         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6623                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6624                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6625                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6626         I40E_WRITE_FLUSH(hw);
6627 }
6628
6629 static void
6630 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6631 {
6632         /* read pending request and disable first */
6633         i40e_pf_disable_irq0(hw);
6634         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6635         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6636                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6637
6638         if (no_queue)
6639                 /* Link no queues with irq0 */
6640                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6641                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6642 }
6643
6644 static void
6645 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6646 {
6647         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6648         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6649         int i;
6650         uint16_t abs_vf_id;
6651         uint32_t index, offset, val;
6652
6653         if (!pf->vfs)
6654                 return;
6655         /**
6656          * Try to find which VF trigger a reset, use absolute VF id to access
6657          * since the reg is global register.
6658          */
6659         for (i = 0; i < pf->vf_num; i++) {
6660                 abs_vf_id = hw->func_caps.vf_base_id + i;
6661                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6662                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6663                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6664                 /* VFR event occurred */
6665                 if (val & (0x1 << offset)) {
6666                         int ret;
6667
6668                         /* Clear the event first */
6669                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6670                                                         (0x1 << offset));
6671                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6672                         /**
6673                          * Only notify a VF reset event occurred,
6674                          * don't trigger another SW reset
6675                          */
6676                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6677                         if (ret != I40E_SUCCESS)
6678                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6679                 }
6680         }
6681 }
6682
6683 static void
6684 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6685 {
6686         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6687         int i;
6688
6689         for (i = 0; i < pf->vf_num; i++)
6690                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6691 }
6692
6693 static void
6694 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6695 {
6696         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6697         struct i40e_arq_event_info info;
6698         uint16_t pending, opcode;
6699         int ret;
6700
6701         info.buf_len = I40E_AQ_BUF_SZ;
6702         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6703         if (!info.msg_buf) {
6704                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6705                 return;
6706         }
6707
6708         pending = 1;
6709         while (pending) {
6710                 ret = i40e_clean_arq_element(hw, &info, &pending);
6711
6712                 if (ret != I40E_SUCCESS) {
6713                         PMD_DRV_LOG(INFO,
6714                                 "Failed to read msg from AdminQ, aq_err: %u",
6715                                 hw->aq.asq_last_status);
6716                         break;
6717                 }
6718                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6719
6720                 switch (opcode) {
6721                 case i40e_aqc_opc_send_msg_to_pf:
6722                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6723                         i40e_pf_host_handle_vf_msg(dev,
6724                                         rte_le_to_cpu_16(info.desc.retval),
6725                                         rte_le_to_cpu_32(info.desc.cookie_high),
6726                                         rte_le_to_cpu_32(info.desc.cookie_low),
6727                                         info.msg_buf,
6728                                         info.msg_len);
6729                         break;
6730                 case i40e_aqc_opc_get_link_status:
6731                         ret = i40e_dev_link_update(dev, 0);
6732                         if (!ret)
6733                                 rte_eth_dev_callback_process(dev,
6734                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6735                         break;
6736                 default:
6737                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6738                                     opcode);
6739                         break;
6740                 }
6741         }
6742         rte_free(info.msg_buf);
6743 }
6744
6745 static void
6746 i40e_handle_mdd_event(struct rte_eth_dev *dev)
6747 {
6748 #define I40E_MDD_CLEAR32 0xFFFFFFFF
6749 #define I40E_MDD_CLEAR16 0xFFFF
6750         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6751         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6752         bool mdd_detected = false;
6753         struct i40e_pf_vf *vf;
6754         uint32_t reg;
6755         int i;
6756
6757         /* find what triggered the MDD event */
6758         reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6759         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6760                 uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6761                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6762                 uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6763                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6764                 uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6765                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6766                 uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6767                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6768                                         hw->func_caps.base_queue;
6769                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6770                         "queue %d PF number 0x%02x VF number 0x%02x device %s\n",
6771                                 event, queue, pf_num, vf_num, dev->data->name);
6772                 I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6773                 mdd_detected = true;
6774         }
6775         reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6776         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6777                 uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6778                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6779                 uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6780                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6781                 uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6782                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6783                                         hw->func_caps.base_queue;
6784
6785                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6786                                 "queue %d of function 0x%02x device %s\n",
6787                                         event, queue, func, dev->data->name);
6788                 I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6789                 mdd_detected = true;
6790         }
6791
6792         if (mdd_detected) {
6793                 reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6794                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6795                         I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6796                         PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
6797                 }
6798                 reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6799                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6800                         I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6801                                         I40E_MDD_CLEAR16);
6802                         PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
6803                 }
6804         }
6805
6806         /* see if one of the VFs needs its hand slapped */
6807         for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6808                 vf = &pf->vfs[i];
6809                 reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6810                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6811                         I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
6812                                         I40E_MDD_CLEAR16);
6813                         vf->num_mdd_events++;
6814                         PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
6815                                         PRIu64 "times\n",
6816                                         i, vf->num_mdd_events);
6817                 }
6818
6819                 reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
6820                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6821                         I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
6822                                         I40E_MDD_CLEAR16);
6823                         vf->num_mdd_events++;
6824                         PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
6825                                         PRIu64 "times\n",
6826                                         i, vf->num_mdd_events);
6827                 }
6828         }
6829 }
6830
6831 /**
6832  * Interrupt handler triggered by NIC  for handling
6833  * specific interrupt.
6834  *
6835  * @param handle
6836  *  Pointer to interrupt handle.
6837  * @param param
6838  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6839  *
6840  * @return
6841  *  void
6842  */
6843 static void
6844 i40e_dev_interrupt_handler(void *param)
6845 {
6846         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6847         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6848         uint32_t icr0;
6849
6850         /* Disable interrupt */
6851         i40e_pf_disable_irq0(hw);
6852
6853         /* read out interrupt causes */
6854         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6855
6856         /* No interrupt event indicated */
6857         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6858                 PMD_DRV_LOG(INFO, "No interrupt event");
6859                 goto done;
6860         }
6861         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6862                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6863         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6864                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6865                 i40e_handle_mdd_event(dev);
6866         }
6867         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6868                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6869         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6870                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6871         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6872                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6873         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6874                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6875         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6876                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6877
6878         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6879                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6880                 i40e_dev_handle_vfr_event(dev);
6881         }
6882         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6883                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6884                 i40e_dev_handle_aq_msg(dev);
6885         }
6886
6887 done:
6888         /* Enable interrupt */
6889         i40e_pf_enable_irq0(hw);
6890 }
6891
6892 static void
6893 i40e_dev_alarm_handler(void *param)
6894 {
6895         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6896         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6897         uint32_t icr0;
6898
6899         /* Disable interrupt */
6900         i40e_pf_disable_irq0(hw);
6901
6902         /* read out interrupt causes */
6903         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6904
6905         /* No interrupt event indicated */
6906         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
6907                 goto done;
6908         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6909                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6910         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6911                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6912                 i40e_handle_mdd_event(dev);
6913         }
6914         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6915                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6916         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6917                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6918         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6919                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6920         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6921                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6922         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6923                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6924
6925         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6926                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6927                 i40e_dev_handle_vfr_event(dev);
6928         }
6929         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6930                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6931                 i40e_dev_handle_aq_msg(dev);
6932         }
6933
6934 done:
6935         /* Enable interrupt */
6936         i40e_pf_enable_irq0(hw);
6937         rte_eal_alarm_set(I40E_ALARM_INTERVAL,
6938                           i40e_dev_alarm_handler, dev);
6939 }
6940
6941 int
6942 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6943                          struct i40e_macvlan_filter *filter,
6944                          int total)
6945 {
6946         int ele_num, ele_buff_size;
6947         int num, actual_num, i;
6948         uint16_t flags;
6949         int ret = I40E_SUCCESS;
6950         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6951         struct i40e_aqc_add_macvlan_element_data *req_list;
6952
6953         if (filter == NULL  || total == 0)
6954                 return I40E_ERR_PARAM;
6955         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6956         ele_buff_size = hw->aq.asq_buf_size;
6957
6958         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6959         if (req_list == NULL) {
6960                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6961                 return I40E_ERR_NO_MEMORY;
6962         }
6963
6964         num = 0;
6965         do {
6966                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6967                 memset(req_list, 0, ele_buff_size);
6968
6969                 for (i = 0; i < actual_num; i++) {
6970                         rte_memcpy(req_list[i].mac_addr,
6971                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6972                         req_list[i].vlan_tag =
6973                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6974
6975                         switch (filter[num + i].filter_type) {
6976                         case I40E_MAC_PERFECT_MATCH:
6977                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6978                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6979                                 break;
6980                         case I40E_MACVLAN_PERFECT_MATCH:
6981                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6982                                 break;
6983                         case I40E_MAC_HASH_MATCH:
6984                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6985                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6986                                 break;
6987                         case I40E_MACVLAN_HASH_MATCH:
6988                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6989                                 break;
6990                         default:
6991                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6992                                 ret = I40E_ERR_PARAM;
6993                                 goto DONE;
6994                         }
6995
6996                         req_list[i].queue_number = 0;
6997
6998                         req_list[i].flags = rte_cpu_to_le_16(flags);
6999                 }
7000
7001                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
7002                                                 actual_num, NULL);
7003                 if (ret != I40E_SUCCESS) {
7004                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
7005                         goto DONE;
7006                 }
7007                 num += actual_num;
7008         } while (num < total);
7009
7010 DONE:
7011         rte_free(req_list);
7012         return ret;
7013 }
7014
7015 int
7016 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
7017                             struct i40e_macvlan_filter *filter,
7018                             int total)
7019 {
7020         int ele_num, ele_buff_size;
7021         int num, actual_num, i;
7022         uint16_t flags;
7023         int ret = I40E_SUCCESS;
7024         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7025         struct i40e_aqc_remove_macvlan_element_data *req_list;
7026
7027         if (filter == NULL  || total == 0)
7028                 return I40E_ERR_PARAM;
7029
7030         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7031         ele_buff_size = hw->aq.asq_buf_size;
7032
7033         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7034         if (req_list == NULL) {
7035                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
7036                 return I40E_ERR_NO_MEMORY;
7037         }
7038
7039         num = 0;
7040         do {
7041                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7042                 memset(req_list, 0, ele_buff_size);
7043
7044                 for (i = 0; i < actual_num; i++) {
7045                         rte_memcpy(req_list[i].mac_addr,
7046                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
7047                         req_list[i].vlan_tag =
7048                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
7049
7050                         switch (filter[num + i].filter_type) {
7051                         case I40E_MAC_PERFECT_MATCH:
7052                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7053                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7054                                 break;
7055                         case I40E_MACVLAN_PERFECT_MATCH:
7056                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7057                                 break;
7058                         case I40E_MAC_HASH_MATCH:
7059                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7060                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7061                                 break;
7062                         case I40E_MACVLAN_HASH_MATCH:
7063                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7064                                 break;
7065                         default:
7066                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7067                                 ret = I40E_ERR_PARAM;
7068                                 goto DONE;
7069                         }
7070                         req_list[i].flags = rte_cpu_to_le_16(flags);
7071                 }
7072
7073                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
7074                                                 actual_num, NULL);
7075                 if (ret != I40E_SUCCESS) {
7076                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7077                         goto DONE;
7078                 }
7079                 num += actual_num;
7080         } while (num < total);
7081
7082 DONE:
7083         rte_free(req_list);
7084         return ret;
7085 }
7086
7087 /* Find out specific MAC filter */
7088 static struct i40e_mac_filter *
7089 i40e_find_mac_filter(struct i40e_vsi *vsi,
7090                          struct rte_ether_addr *macaddr)
7091 {
7092         struct i40e_mac_filter *f;
7093
7094         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7095                 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7096                         return f;
7097         }
7098
7099         return NULL;
7100 }
7101
7102 static bool
7103 i40e_find_vlan_filter(struct i40e_vsi *vsi,
7104                          uint16_t vlan_id)
7105 {
7106         uint32_t vid_idx, vid_bit;
7107
7108         if (vlan_id > ETH_VLAN_ID_MAX)
7109                 return 0;
7110
7111         vid_idx = I40E_VFTA_IDX(vlan_id);
7112         vid_bit = I40E_VFTA_BIT(vlan_id);
7113
7114         if (vsi->vfta[vid_idx] & vid_bit)
7115                 return 1;
7116         else
7117                 return 0;
7118 }
7119
7120 static void
7121 i40e_store_vlan_filter(struct i40e_vsi *vsi,
7122                        uint16_t vlan_id, bool on)
7123 {
7124         uint32_t vid_idx, vid_bit;
7125
7126         vid_idx = I40E_VFTA_IDX(vlan_id);
7127         vid_bit = I40E_VFTA_BIT(vlan_id);
7128
7129         if (on)
7130                 vsi->vfta[vid_idx] |= vid_bit;
7131         else
7132                 vsi->vfta[vid_idx] &= ~vid_bit;
7133 }
7134
7135 void
7136 i40e_set_vlan_filter(struct i40e_vsi *vsi,
7137                      uint16_t vlan_id, bool on)
7138 {
7139         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7140         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7141         int ret;
7142
7143         if (vlan_id > ETH_VLAN_ID_MAX)
7144                 return;
7145
7146         i40e_store_vlan_filter(vsi, vlan_id, on);
7147
7148         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7149                 return;
7150
7151         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7152
7153         if (on) {
7154                 ret = i40e_aq_add_vlan(hw, vsi->seid,
7155                                        &vlan_data, 1, NULL);
7156                 if (ret != I40E_SUCCESS)
7157                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7158         } else {
7159                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
7160                                           &vlan_data, 1, NULL);
7161                 if (ret != I40E_SUCCESS)
7162                         PMD_DRV_LOG(ERR,
7163                                     "Failed to remove vlan filter");
7164         }
7165 }
7166
7167 /**
7168  * Find all vlan options for specific mac addr,
7169  * return with actual vlan found.
7170  */
7171 int
7172 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7173                            struct i40e_macvlan_filter *mv_f,
7174                            int num, struct rte_ether_addr *addr)
7175 {
7176         int i;
7177         uint32_t j, k;
7178
7179         /**
7180          * Not to use i40e_find_vlan_filter to decrease the loop time,
7181          * although the code looks complex.
7182           */
7183         if (num < vsi->vlan_num)
7184                 return I40E_ERR_PARAM;
7185
7186         i = 0;
7187         for (j = 0; j < I40E_VFTA_SIZE; j++) {
7188                 if (vsi->vfta[j]) {
7189                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7190                                 if (vsi->vfta[j] & (1 << k)) {
7191                                         if (i > num - 1) {
7192                                                 PMD_DRV_LOG(ERR,
7193                                                         "vlan number doesn't match");
7194                                                 return I40E_ERR_PARAM;
7195                                         }
7196                                         rte_memcpy(&mv_f[i].macaddr,
7197                                                         addr, ETH_ADDR_LEN);
7198                                         mv_f[i].vlan_id =
7199                                                 j * I40E_UINT32_BIT_SIZE + k;
7200                                         i++;
7201                                 }
7202                         }
7203                 }
7204         }
7205         return I40E_SUCCESS;
7206 }
7207
7208 static inline int
7209 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7210                            struct i40e_macvlan_filter *mv_f,
7211                            int num,
7212                            uint16_t vlan)
7213 {
7214         int i = 0;
7215         struct i40e_mac_filter *f;
7216
7217         if (num < vsi->mac_num)
7218                 return I40E_ERR_PARAM;
7219
7220         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7221                 if (i > num - 1) {
7222                         PMD_DRV_LOG(ERR, "buffer number not match");
7223                         return I40E_ERR_PARAM;
7224                 }
7225                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7226                                 ETH_ADDR_LEN);
7227                 mv_f[i].vlan_id = vlan;
7228                 mv_f[i].filter_type = f->mac_info.filter_type;
7229                 i++;
7230         }
7231
7232         return I40E_SUCCESS;
7233 }
7234
7235 static int
7236 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7237 {
7238         int i, j, num;
7239         struct i40e_mac_filter *f;
7240         struct i40e_macvlan_filter *mv_f;
7241         int ret = I40E_SUCCESS;
7242
7243         if (vsi == NULL || vsi->mac_num == 0)
7244                 return I40E_ERR_PARAM;
7245
7246         /* Case that no vlan is set */
7247         if (vsi->vlan_num == 0)
7248                 num = vsi->mac_num;
7249         else
7250                 num = vsi->mac_num * vsi->vlan_num;
7251
7252         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7253         if (mv_f == NULL) {
7254                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7255                 return I40E_ERR_NO_MEMORY;
7256         }
7257
7258         i = 0;
7259         if (vsi->vlan_num == 0) {
7260                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7261                         rte_memcpy(&mv_f[i].macaddr,
7262                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
7263                         mv_f[i].filter_type = f->mac_info.filter_type;
7264                         mv_f[i].vlan_id = 0;
7265                         i++;
7266                 }
7267         } else {
7268                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7269                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7270                                         vsi->vlan_num, &f->mac_info.mac_addr);
7271                         if (ret != I40E_SUCCESS)
7272                                 goto DONE;
7273                         for (j = i; j < i + vsi->vlan_num; j++)
7274                                 mv_f[j].filter_type = f->mac_info.filter_type;
7275                         i += vsi->vlan_num;
7276                 }
7277         }
7278
7279         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7280 DONE:
7281         rte_free(mv_f);
7282
7283         return ret;
7284 }
7285
7286 int
7287 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7288 {
7289         struct i40e_macvlan_filter *mv_f;
7290         int mac_num;
7291         int ret = I40E_SUCCESS;
7292
7293         if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7294                 return I40E_ERR_PARAM;
7295
7296         /* If it's already set, just return */
7297         if (i40e_find_vlan_filter(vsi,vlan))
7298                 return I40E_SUCCESS;
7299
7300         mac_num = vsi->mac_num;
7301
7302         if (mac_num == 0) {
7303                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7304                 return I40E_ERR_PARAM;
7305         }
7306
7307         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7308
7309         if (mv_f == NULL) {
7310                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7311                 return I40E_ERR_NO_MEMORY;
7312         }
7313
7314         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7315
7316         if (ret != I40E_SUCCESS)
7317                 goto DONE;
7318
7319         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7320
7321         if (ret != I40E_SUCCESS)
7322                 goto DONE;
7323
7324         i40e_set_vlan_filter(vsi, vlan, 1);
7325
7326         vsi->vlan_num++;
7327         ret = I40E_SUCCESS;
7328 DONE:
7329         rte_free(mv_f);
7330         return ret;
7331 }
7332
7333 int
7334 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7335 {
7336         struct i40e_macvlan_filter *mv_f;
7337         int mac_num;
7338         int ret = I40E_SUCCESS;
7339
7340         /**
7341          * Vlan 0 is the generic filter for untagged packets
7342          * and can't be removed.
7343          */
7344         if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7345                 return I40E_ERR_PARAM;
7346
7347         /* If can't find it, just return */
7348         if (!i40e_find_vlan_filter(vsi, vlan))
7349                 return I40E_ERR_PARAM;
7350
7351         mac_num = vsi->mac_num;
7352
7353         if (mac_num == 0) {
7354                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7355                 return I40E_ERR_PARAM;
7356         }
7357
7358         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7359
7360         if (mv_f == NULL) {
7361                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7362                 return I40E_ERR_NO_MEMORY;
7363         }
7364
7365         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7366
7367         if (ret != I40E_SUCCESS)
7368                 goto DONE;
7369
7370         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7371
7372         if (ret != I40E_SUCCESS)
7373                 goto DONE;
7374
7375         /* This is last vlan to remove, replace all mac filter with vlan 0 */
7376         if (vsi->vlan_num == 1) {
7377                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7378                 if (ret != I40E_SUCCESS)
7379                         goto DONE;
7380
7381                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7382                 if (ret != I40E_SUCCESS)
7383                         goto DONE;
7384         }
7385
7386         i40e_set_vlan_filter(vsi, vlan, 0);
7387
7388         vsi->vlan_num--;
7389         ret = I40E_SUCCESS;
7390 DONE:
7391         rte_free(mv_f);
7392         return ret;
7393 }
7394
7395 int
7396 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7397 {
7398         struct i40e_mac_filter *f;
7399         struct i40e_macvlan_filter *mv_f;
7400         int i, vlan_num = 0;
7401         int ret = I40E_SUCCESS;
7402
7403         /* If it's add and we've config it, return */
7404         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7405         if (f != NULL)
7406                 return I40E_SUCCESS;
7407         if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7408                 mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7409
7410                 /**
7411                  * If vlan_num is 0, that's the first time to add mac,
7412                  * set mask for vlan_id 0.
7413                  */
7414                 if (vsi->vlan_num == 0) {
7415                         i40e_set_vlan_filter(vsi, 0, 1);
7416                         vsi->vlan_num = 1;
7417                 }
7418                 vlan_num = vsi->vlan_num;
7419         } else if (mac_filter->filter_type == I40E_MAC_PERFECT_MATCH ||
7420                         mac_filter->filter_type == I40E_MAC_HASH_MATCH)
7421                 vlan_num = 1;
7422
7423         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7424         if (mv_f == NULL) {
7425                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7426                 return I40E_ERR_NO_MEMORY;
7427         }
7428
7429         for (i = 0; i < vlan_num; i++) {
7430                 mv_f[i].filter_type = mac_filter->filter_type;
7431                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7432                                 ETH_ADDR_LEN);
7433         }
7434
7435         if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7436                 mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7437                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7438                                         &mac_filter->mac_addr);
7439                 if (ret != I40E_SUCCESS)
7440                         goto DONE;
7441         }
7442
7443         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7444         if (ret != I40E_SUCCESS)
7445                 goto DONE;
7446
7447         /* Add the mac addr into mac list */
7448         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7449         if (f == NULL) {
7450                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7451                 ret = I40E_ERR_NO_MEMORY;
7452                 goto DONE;
7453         }
7454         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7455                         ETH_ADDR_LEN);
7456         f->mac_info.filter_type = mac_filter->filter_type;
7457         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7458         vsi->mac_num++;
7459
7460         ret = I40E_SUCCESS;
7461 DONE:
7462         rte_free(mv_f);
7463
7464         return ret;
7465 }
7466
7467 int
7468 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7469 {
7470         struct i40e_mac_filter *f;
7471         struct i40e_macvlan_filter *mv_f;
7472         int i, vlan_num;
7473         enum i40e_mac_filter_type filter_type;
7474         int ret = I40E_SUCCESS;
7475
7476         /* Can't find it, return an error */
7477         f = i40e_find_mac_filter(vsi, addr);
7478         if (f == NULL)
7479                 return I40E_ERR_PARAM;
7480
7481         vlan_num = vsi->vlan_num;
7482         filter_type = f->mac_info.filter_type;
7483         if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7484                 filter_type == I40E_MACVLAN_HASH_MATCH) {
7485                 if (vlan_num == 0) {
7486                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7487                         return I40E_ERR_PARAM;
7488                 }
7489         } else if (filter_type == I40E_MAC_PERFECT_MATCH ||
7490                         filter_type == I40E_MAC_HASH_MATCH)
7491                 vlan_num = 1;
7492
7493         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7494         if (mv_f == NULL) {
7495                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7496                 return I40E_ERR_NO_MEMORY;
7497         }
7498
7499         for (i = 0; i < vlan_num; i++) {
7500                 mv_f[i].filter_type = filter_type;
7501                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7502                                 ETH_ADDR_LEN);
7503         }
7504         if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7505                         filter_type == I40E_MACVLAN_HASH_MATCH) {
7506                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7507                 if (ret != I40E_SUCCESS)
7508                         goto DONE;
7509         }
7510
7511         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7512         if (ret != I40E_SUCCESS)
7513                 goto DONE;
7514
7515         /* Remove the mac addr into mac list */
7516         TAILQ_REMOVE(&vsi->mac_list, f, next);
7517         rte_free(f);
7518         vsi->mac_num--;
7519
7520         ret = I40E_SUCCESS;
7521 DONE:
7522         rte_free(mv_f);
7523         return ret;
7524 }
7525
7526 /* Configure hash enable flags for RSS */
7527 uint64_t
7528 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7529 {
7530         uint64_t hena = 0;
7531         int i;
7532
7533         if (!flags)
7534                 return hena;
7535
7536         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7537                 if (flags & (1ULL << i))
7538                         hena |= adapter->pctypes_tbl[i];
7539         }
7540
7541         return hena;
7542 }
7543
7544 /* Parse the hash enable flags */
7545 uint64_t
7546 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7547 {
7548         uint64_t rss_hf = 0;
7549
7550         if (!flags)
7551                 return rss_hf;
7552         int i;
7553
7554         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7555                 if (flags & adapter->pctypes_tbl[i])
7556                         rss_hf |= (1ULL << i);
7557         }
7558         return rss_hf;
7559 }
7560
7561 /* Disable RSS */
7562 void
7563 i40e_pf_disable_rss(struct i40e_pf *pf)
7564 {
7565         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7566
7567         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7568         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7569         I40E_WRITE_FLUSH(hw);
7570 }
7571
7572 int
7573 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7574 {
7575         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7576         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7577         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7578                            I40E_VFQF_HKEY_MAX_INDEX :
7579                            I40E_PFQF_HKEY_MAX_INDEX;
7580
7581         if (!key || key_len == 0) {
7582                 PMD_DRV_LOG(DEBUG, "No key to be configured");
7583                 return 0;
7584         } else if (key_len != (key_idx + 1) *
7585                 sizeof(uint32_t)) {
7586                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7587                 return -EINVAL;
7588         }
7589
7590         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7591                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7592                                 (struct i40e_aqc_get_set_rss_key_data *)key;
7593                 enum i40e_status_code status =
7594                                 i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7595
7596                 if (status) {
7597                         PMD_DRV_LOG(ERR,
7598                                     "Failed to configure RSS key via AQ, error status: %d",
7599                                     status);
7600                         return -EIO;
7601                 }
7602         } else {
7603                 uint32_t *hash_key = (uint32_t *)key;
7604                 uint16_t i;
7605
7606                 if (vsi->type == I40E_VSI_SRIOV) {
7607                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7608                                 I40E_WRITE_REG(
7609                                         hw,
7610                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7611                                         hash_key[i]);
7612
7613                 } else {
7614                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7615                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7616                                                hash_key[i]);
7617                 }
7618                 I40E_WRITE_FLUSH(hw);
7619         }
7620
7621         return 0;
7622 }
7623
7624 static int
7625 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7626 {
7627         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7628         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7629         uint32_t reg;
7630         int ret;
7631
7632         if (!key || !key_len)
7633                 return 0;
7634
7635         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7636                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7637                         (struct i40e_aqc_get_set_rss_key_data *)key);
7638                 if (ret) {
7639                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7640                         return ret;
7641                 }
7642         } else {
7643                 uint32_t *key_dw = (uint32_t *)key;
7644                 uint16_t i;
7645
7646                 if (vsi->type == I40E_VSI_SRIOV) {
7647                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7648                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7649                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7650                         }
7651                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7652                                    sizeof(uint32_t);
7653                 } else {
7654                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7655                                 reg = I40E_PFQF_HKEY(i);
7656                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7657                         }
7658                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7659                                    sizeof(uint32_t);
7660                 }
7661         }
7662         return 0;
7663 }
7664
7665 static int
7666 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7667 {
7668         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7669         uint64_t hena;
7670         int ret;
7671
7672         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7673                                rss_conf->rss_key_len);
7674         if (ret)
7675                 return ret;
7676
7677         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7678         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7679         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7680         I40E_WRITE_FLUSH(hw);
7681
7682         return 0;
7683 }
7684
7685 static int
7686 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7687                          struct rte_eth_rss_conf *rss_conf)
7688 {
7689         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7690         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7691         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7692         uint64_t hena;
7693
7694         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7695         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7696
7697         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7698                 if (rss_hf != 0) /* Enable RSS */
7699                         return -EINVAL;
7700                 return 0; /* Nothing to do */
7701         }
7702         /* RSS enabled */
7703         if (rss_hf == 0) /* Disable RSS */
7704                 return -EINVAL;
7705
7706         return i40e_hw_rss_hash_set(pf, rss_conf);
7707 }
7708
7709 static int
7710 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7711                            struct rte_eth_rss_conf *rss_conf)
7712 {
7713         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7714         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7715         uint64_t hena;
7716         int ret;
7717
7718         if (!rss_conf)
7719                 return -EINVAL;
7720
7721         ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7722                          &rss_conf->rss_key_len);
7723         if (ret)
7724                 return ret;
7725
7726         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7727         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7728         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7729
7730         return 0;
7731 }
7732
7733 static int
7734 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7735 {
7736         switch (filter_type) {
7737         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7738                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7739                 break;
7740         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7741                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7742                 break;
7743         case RTE_TUNNEL_FILTER_IMAC_TENID:
7744                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7745                 break;
7746         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7747                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7748                 break;
7749         case ETH_TUNNEL_FILTER_IMAC:
7750                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7751                 break;
7752         case ETH_TUNNEL_FILTER_OIP:
7753                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7754                 break;
7755         case ETH_TUNNEL_FILTER_IIP:
7756                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7757                 break;
7758         default:
7759                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7760                 return -EINVAL;
7761         }
7762
7763         return 0;
7764 }
7765
7766 /* Convert tunnel filter structure */
7767 static int
7768 i40e_tunnel_filter_convert(
7769         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7770         struct i40e_tunnel_filter *tunnel_filter)
7771 {
7772         rte_ether_addr_copy((struct rte_ether_addr *)
7773                         &cld_filter->element.outer_mac,
7774                 (struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7775         rte_ether_addr_copy((struct rte_ether_addr *)
7776                         &cld_filter->element.inner_mac,
7777                 (struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7778         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7779         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7780              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7781             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7782                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7783         else
7784                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7785         tunnel_filter->input.flags = cld_filter->element.flags;
7786         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7787         tunnel_filter->queue = cld_filter->element.queue_number;
7788         rte_memcpy(tunnel_filter->input.general_fields,
7789                    cld_filter->general_fields,
7790                    sizeof(cld_filter->general_fields));
7791
7792         return 0;
7793 }
7794
7795 /* Check if there exists the tunnel filter */
7796 struct i40e_tunnel_filter *
7797 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7798                              const struct i40e_tunnel_filter_input *input)
7799 {
7800         int ret;
7801
7802         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7803         if (ret < 0)
7804                 return NULL;
7805
7806         return tunnel_rule->hash_map[ret];
7807 }
7808
7809 /* Add a tunnel filter into the SW list */
7810 static int
7811 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7812                              struct i40e_tunnel_filter *tunnel_filter)
7813 {
7814         struct i40e_tunnel_rule *rule = &pf->tunnel;
7815         int ret;
7816
7817         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7818         if (ret < 0) {
7819                 PMD_DRV_LOG(ERR,
7820                             "Failed to insert tunnel filter to hash table %d!",
7821                             ret);
7822                 return ret;
7823         }
7824         rule->hash_map[ret] = tunnel_filter;
7825
7826         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7827
7828         return 0;
7829 }
7830
7831 /* Delete a tunnel filter from the SW list */
7832 int
7833 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7834                           struct i40e_tunnel_filter_input *input)
7835 {
7836         struct i40e_tunnel_rule *rule = &pf->tunnel;
7837         struct i40e_tunnel_filter *tunnel_filter;
7838         int ret;
7839
7840         ret = rte_hash_del_key(rule->hash_table, input);
7841         if (ret < 0) {
7842                 PMD_DRV_LOG(ERR,
7843                             "Failed to delete tunnel filter to hash table %d!",
7844                             ret);
7845                 return ret;
7846         }
7847         tunnel_filter = rule->hash_map[ret];
7848         rule->hash_map[ret] = NULL;
7849
7850         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7851         rte_free(tunnel_filter);
7852
7853         return 0;
7854 }
7855
7856 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7857 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7858 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7859 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7860 #define I40E_TR_GRE_KEY_MASK                    0x400
7861 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7862 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7863 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
7864 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
7865 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
7866 #define I40E_DIRECTION_INGRESS_KEY              0x8000
7867 #define I40E_TR_L4_TYPE_TCP                     0x2
7868 #define I40E_TR_L4_TYPE_UDP                     0x4
7869 #define I40E_TR_L4_TYPE_SCTP                    0x8
7870
7871 static enum
7872 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7873 {
7874         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7875         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7876         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7877         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7878         enum i40e_status_code status = I40E_SUCCESS;
7879
7880         if (pf->support_multi_driver) {
7881                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7882                 return I40E_NOT_SUPPORTED;
7883         }
7884
7885         memset(&filter_replace, 0,
7886                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7887         memset(&filter_replace_buf, 0,
7888                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7889
7890         /* create L1 filter */
7891         filter_replace.old_filter_type =
7892                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7893         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7894         filter_replace.tr_bit = 0;
7895
7896         /* Prepare the buffer, 3 entries */
7897         filter_replace_buf.data[0] =
7898                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7899         filter_replace_buf.data[0] |=
7900                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7901         filter_replace_buf.data[2] = 0xFF;
7902         filter_replace_buf.data[3] = 0xFF;
7903         filter_replace_buf.data[4] =
7904                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7905         filter_replace_buf.data[4] |=
7906                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7907         filter_replace_buf.data[7] = 0xF0;
7908         filter_replace_buf.data[8]
7909                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7910         filter_replace_buf.data[8] |=
7911                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7912         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7913                 I40E_TR_GENEVE_KEY_MASK |
7914                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7915         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7916                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7917                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7918
7919         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7920                                                &filter_replace_buf);
7921         if (!status && (filter_replace.old_filter_type !=
7922                         filter_replace.new_filter_type))
7923                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7924                             " original: 0x%x, new: 0x%x",
7925                             dev->device->name,
7926                             filter_replace.old_filter_type,
7927                             filter_replace.new_filter_type);
7928
7929         return status;
7930 }
7931
7932 static enum
7933 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7934 {
7935         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7936         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7937         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7938         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7939         enum i40e_status_code status = I40E_SUCCESS;
7940
7941         if (pf->support_multi_driver) {
7942                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7943                 return I40E_NOT_SUPPORTED;
7944         }
7945
7946         /* For MPLSoUDP */
7947         memset(&filter_replace, 0,
7948                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7949         memset(&filter_replace_buf, 0,
7950                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7951         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7952                 I40E_AQC_MIRROR_CLOUD_FILTER;
7953         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7954         filter_replace.new_filter_type =
7955                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7956         /* Prepare the buffer, 2 entries */
7957         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7958         filter_replace_buf.data[0] |=
7959                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7960         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7961         filter_replace_buf.data[4] |=
7962                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7963         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7964                                                &filter_replace_buf);
7965         if (status < 0)
7966                 return status;
7967         if (filter_replace.old_filter_type !=
7968             filter_replace.new_filter_type)
7969                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7970                             " original: 0x%x, new: 0x%x",
7971                             dev->device->name,
7972                             filter_replace.old_filter_type,
7973                             filter_replace.new_filter_type);
7974
7975         /* For MPLSoGRE */
7976         memset(&filter_replace, 0,
7977                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7978         memset(&filter_replace_buf, 0,
7979                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7980
7981         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7982                 I40E_AQC_MIRROR_CLOUD_FILTER;
7983         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7984         filter_replace.new_filter_type =
7985                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7986         /* Prepare the buffer, 2 entries */
7987         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7988         filter_replace_buf.data[0] |=
7989                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7990         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7991         filter_replace_buf.data[4] |=
7992                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7993
7994         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7995                                                &filter_replace_buf);
7996         if (!status && (filter_replace.old_filter_type !=
7997                         filter_replace.new_filter_type))
7998                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7999                             " original: 0x%x, new: 0x%x",
8000                             dev->device->name,
8001                             filter_replace.old_filter_type,
8002                             filter_replace.new_filter_type);
8003
8004         return status;
8005 }
8006
8007 static enum i40e_status_code
8008 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
8009 {
8010         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8011         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8012         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8013         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8014         enum i40e_status_code status = I40E_SUCCESS;
8015
8016         if (pf->support_multi_driver) {
8017                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8018                 return I40E_NOT_SUPPORTED;
8019         }
8020
8021         /* For GTP-C */
8022         memset(&filter_replace, 0,
8023                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8024         memset(&filter_replace_buf, 0,
8025                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8026         /* create L1 filter */
8027         filter_replace.old_filter_type =
8028                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8029         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8030         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8031                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8032         /* Prepare the buffer, 2 entries */
8033         filter_replace_buf.data[0] =
8034                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8035         filter_replace_buf.data[0] |=
8036                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8037         filter_replace_buf.data[2] = 0xFF;
8038         filter_replace_buf.data[3] = 0xFF;
8039         filter_replace_buf.data[4] =
8040                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8041         filter_replace_buf.data[4] |=
8042                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8043         filter_replace_buf.data[6] = 0xFF;
8044         filter_replace_buf.data[7] = 0xFF;
8045         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8046                                                &filter_replace_buf);
8047         if (status < 0)
8048                 return status;
8049         if (filter_replace.old_filter_type !=
8050             filter_replace.new_filter_type)
8051                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8052                             " original: 0x%x, new: 0x%x",
8053                             dev->device->name,
8054                             filter_replace.old_filter_type,
8055                             filter_replace.new_filter_type);
8056
8057         /* for GTP-U */
8058         memset(&filter_replace, 0,
8059                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8060         memset(&filter_replace_buf, 0,
8061                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8062         /* create L1 filter */
8063         filter_replace.old_filter_type =
8064                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8065         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8066         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8067                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8068         /* Prepare the buffer, 2 entries */
8069         filter_replace_buf.data[0] =
8070                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8071         filter_replace_buf.data[0] |=
8072                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8073         filter_replace_buf.data[2] = 0xFF;
8074         filter_replace_buf.data[3] = 0xFF;
8075         filter_replace_buf.data[4] =
8076                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8077         filter_replace_buf.data[4] |=
8078                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8079         filter_replace_buf.data[6] = 0xFF;
8080         filter_replace_buf.data[7] = 0xFF;
8081
8082         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8083                                                &filter_replace_buf);
8084         if (!status && (filter_replace.old_filter_type !=
8085                         filter_replace.new_filter_type))
8086                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8087                             " original: 0x%x, new: 0x%x",
8088                             dev->device->name,
8089                             filter_replace.old_filter_type,
8090                             filter_replace.new_filter_type);
8091
8092         return status;
8093 }
8094
8095 static enum
8096 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8097 {
8098         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8099         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8100         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8101         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8102         enum i40e_status_code status = I40E_SUCCESS;
8103
8104         if (pf->support_multi_driver) {
8105                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8106                 return I40E_NOT_SUPPORTED;
8107         }
8108
8109         /* for GTP-C */
8110         memset(&filter_replace, 0,
8111                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8112         memset(&filter_replace_buf, 0,
8113                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8114         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8115         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8116         filter_replace.new_filter_type =
8117                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8118         /* Prepare the buffer, 2 entries */
8119         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8120         filter_replace_buf.data[0] |=
8121                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8122         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8123         filter_replace_buf.data[4] |=
8124                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8125         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8126                                                &filter_replace_buf);
8127         if (status < 0)
8128                 return status;
8129         if (filter_replace.old_filter_type !=
8130             filter_replace.new_filter_type)
8131                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8132                             " original: 0x%x, new: 0x%x",
8133                             dev->device->name,
8134                             filter_replace.old_filter_type,
8135                             filter_replace.new_filter_type);
8136
8137         /* for GTP-U */
8138         memset(&filter_replace, 0,
8139                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8140         memset(&filter_replace_buf, 0,
8141                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8142         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8143         filter_replace.old_filter_type =
8144                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8145         filter_replace.new_filter_type =
8146                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8147         /* Prepare the buffer, 2 entries */
8148         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8149         filter_replace_buf.data[0] |=
8150                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8151         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8152         filter_replace_buf.data[4] |=
8153                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8154
8155         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8156                                                &filter_replace_buf);
8157         if (!status && (filter_replace.old_filter_type !=
8158                         filter_replace.new_filter_type))
8159                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8160                             " original: 0x%x, new: 0x%x",
8161                             dev->device->name,
8162                             filter_replace.old_filter_type,
8163                             filter_replace.new_filter_type);
8164
8165         return status;
8166 }
8167
8168 static enum i40e_status_code
8169 i40e_replace_port_l1_filter(struct i40e_pf *pf,
8170                             enum i40e_l4_port_type l4_port_type)
8171 {
8172         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8173         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8174         enum i40e_status_code status = I40E_SUCCESS;
8175         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8176         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8177
8178         if (pf->support_multi_driver) {
8179                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8180                 return I40E_NOT_SUPPORTED;
8181         }
8182
8183         memset(&filter_replace, 0,
8184                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8185         memset(&filter_replace_buf, 0,
8186                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8187
8188         /* create L1 filter */
8189         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8190                 filter_replace.old_filter_type =
8191                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8192                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8193                 filter_replace_buf.data[8] =
8194                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8195         } else {
8196                 filter_replace.old_filter_type =
8197                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8198                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8199                 filter_replace_buf.data[8] =
8200                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8201         }
8202
8203         filter_replace.tr_bit = 0;
8204         /* Prepare the buffer, 3 entries */
8205         filter_replace_buf.data[0] =
8206                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8207         filter_replace_buf.data[0] |=
8208                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8209         filter_replace_buf.data[2] = 0x00;
8210         filter_replace_buf.data[3] =
8211                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8212         filter_replace_buf.data[4] =
8213                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8214         filter_replace_buf.data[4] |=
8215                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8216         filter_replace_buf.data[5] = 0x00;
8217         filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8218                 I40E_TR_L4_TYPE_TCP |
8219                 I40E_TR_L4_TYPE_SCTP;
8220         filter_replace_buf.data[7] = 0x00;
8221         filter_replace_buf.data[8] |=
8222                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8223         filter_replace_buf.data[9] = 0x00;
8224         filter_replace_buf.data[10] = 0xFF;
8225         filter_replace_buf.data[11] = 0xFF;
8226
8227         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8228                                                &filter_replace_buf);
8229         if (!status && filter_replace.old_filter_type !=
8230             filter_replace.new_filter_type)
8231                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8232                             " original: 0x%x, new: 0x%x",
8233                             dev->device->name,
8234                             filter_replace.old_filter_type,
8235                             filter_replace.new_filter_type);
8236
8237         return status;
8238 }
8239
8240 static enum i40e_status_code
8241 i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8242                                enum i40e_l4_port_type l4_port_type)
8243 {
8244         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8245         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8246         enum i40e_status_code status = I40E_SUCCESS;
8247         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8248         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8249
8250         if (pf->support_multi_driver) {
8251                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8252                 return I40E_NOT_SUPPORTED;
8253         }
8254
8255         memset(&filter_replace, 0,
8256                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8257         memset(&filter_replace_buf, 0,
8258                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8259
8260         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8261                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8262                 filter_replace.new_filter_type =
8263                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8264                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8265         } else {
8266                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8267                 filter_replace.new_filter_type =
8268                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8269                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8270         }
8271
8272         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8273         filter_replace.tr_bit = 0;
8274         /* Prepare the buffer, 2 entries */
8275         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8276         filter_replace_buf.data[0] |=
8277                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8278         filter_replace_buf.data[4] |=
8279                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8280         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8281                                                &filter_replace_buf);
8282
8283         if (!status && filter_replace.old_filter_type !=
8284             filter_replace.new_filter_type)
8285                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8286                             " original: 0x%x, new: 0x%x",
8287                             dev->device->name,
8288                             filter_replace.old_filter_type,
8289                             filter_replace.new_filter_type);
8290
8291         return status;
8292 }
8293
8294 int
8295 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8296                       struct i40e_tunnel_filter_conf *tunnel_filter,
8297                       uint8_t add)
8298 {
8299         uint16_t ip_type;
8300         uint32_t ipv4_addr, ipv4_addr_le;
8301         uint8_t i, tun_type = 0;
8302         /* internal variable to convert ipv6 byte order */
8303         uint32_t convert_ipv6[4];
8304         int val, ret = 0;
8305         struct i40e_pf_vf *vf = NULL;
8306         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8307         struct i40e_vsi *vsi;
8308         struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8309         struct i40e_aqc_cloud_filters_element_bb *pfilter;
8310         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8311         struct i40e_tunnel_filter *tunnel, *node;
8312         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8313         uint32_t teid_le;
8314         bool big_buffer = 0;
8315
8316         cld_filter = rte_zmalloc("tunnel_filter",
8317                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8318                          0);
8319
8320         if (cld_filter == NULL) {
8321                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8322                 return -ENOMEM;
8323         }
8324         pfilter = cld_filter;
8325
8326         rte_ether_addr_copy(&tunnel_filter->outer_mac,
8327                         (struct rte_ether_addr *)&pfilter->element.outer_mac);
8328         rte_ether_addr_copy(&tunnel_filter->inner_mac,
8329                         (struct rte_ether_addr *)&pfilter->element.inner_mac);
8330
8331         pfilter->element.inner_vlan =
8332                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8333         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8334                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8335                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8336                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8337                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
8338                                 &ipv4_addr_le,
8339                                 sizeof(pfilter->element.ipaddr.v4.data));
8340         } else {
8341                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8342                 for (i = 0; i < 4; i++) {
8343                         convert_ipv6[i] =
8344                         rte_cpu_to_le_32(rte_be_to_cpu_32(
8345                                          tunnel_filter->ip_addr.ipv6_addr[i]));
8346                 }
8347                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
8348                            &convert_ipv6,
8349                            sizeof(pfilter->element.ipaddr.v6.data));
8350         }
8351
8352         /* check tunneled type */
8353         switch (tunnel_filter->tunnel_type) {
8354         case I40E_TUNNEL_TYPE_VXLAN:
8355                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8356                 break;
8357         case I40E_TUNNEL_TYPE_NVGRE:
8358                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8359                 break;
8360         case I40E_TUNNEL_TYPE_IP_IN_GRE:
8361                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8362                 break;
8363         case I40E_TUNNEL_TYPE_MPLSoUDP:
8364                 if (!pf->mpls_replace_flag) {
8365                         i40e_replace_mpls_l1_filter(pf);
8366                         i40e_replace_mpls_cloud_filter(pf);
8367                         pf->mpls_replace_flag = 1;
8368                 }
8369                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8370                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8371                         teid_le >> 4;
8372                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8373                         (teid_le & 0xF) << 12;
8374                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8375                         0x40;
8376                 big_buffer = 1;
8377                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8378                 break;
8379         case I40E_TUNNEL_TYPE_MPLSoGRE:
8380                 if (!pf->mpls_replace_flag) {
8381                         i40e_replace_mpls_l1_filter(pf);
8382                         i40e_replace_mpls_cloud_filter(pf);
8383                         pf->mpls_replace_flag = 1;
8384                 }
8385                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8386                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8387                         teid_le >> 4;
8388                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8389                         (teid_le & 0xF) << 12;
8390                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8391                         0x0;
8392                 big_buffer = 1;
8393                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8394                 break;
8395         case I40E_TUNNEL_TYPE_GTPC:
8396                 if (!pf->gtp_replace_flag) {
8397                         i40e_replace_gtp_l1_filter(pf);
8398                         i40e_replace_gtp_cloud_filter(pf);
8399                         pf->gtp_replace_flag = 1;
8400                 }
8401                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8402                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8403                         (teid_le >> 16) & 0xFFFF;
8404                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8405                         teid_le & 0xFFFF;
8406                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8407                         0x0;
8408                 big_buffer = 1;
8409                 break;
8410         case I40E_TUNNEL_TYPE_GTPU:
8411                 if (!pf->gtp_replace_flag) {
8412                         i40e_replace_gtp_l1_filter(pf);
8413                         i40e_replace_gtp_cloud_filter(pf);
8414                         pf->gtp_replace_flag = 1;
8415                 }
8416                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8417                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8418                         (teid_le >> 16) & 0xFFFF;
8419                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8420                         teid_le & 0xFFFF;
8421                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8422                         0x0;
8423                 big_buffer = 1;
8424                 break;
8425         case I40E_TUNNEL_TYPE_QINQ:
8426                 if (!pf->qinq_replace_flag) {
8427                         ret = i40e_cloud_filter_qinq_create(pf);
8428                         if (ret < 0)
8429                                 PMD_DRV_LOG(DEBUG,
8430                                             "QinQ tunnel filter already created.");
8431                         pf->qinq_replace_flag = 1;
8432                 }
8433                 /*      Add in the General fields the values of
8434                  *      the Outer and Inner VLAN
8435                  *      Big Buffer should be set, see changes in
8436                  *      i40e_aq_add_cloud_filters
8437                  */
8438                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8439                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8440                 big_buffer = 1;
8441                 break;
8442         case I40E_CLOUD_TYPE_UDP:
8443         case I40E_CLOUD_TYPE_TCP:
8444         case I40E_CLOUD_TYPE_SCTP:
8445                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8446                         if (!pf->sport_replace_flag) {
8447                                 i40e_replace_port_l1_filter(pf,
8448                                                 tunnel_filter->l4_port_type);
8449                                 i40e_replace_port_cloud_filter(pf,
8450                                                 tunnel_filter->l4_port_type);
8451                                 pf->sport_replace_flag = 1;
8452                         }
8453                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8454                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8455                                 I40E_DIRECTION_INGRESS_KEY;
8456
8457                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8458                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8459                                         I40E_TR_L4_TYPE_UDP;
8460                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8461                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8462                                         I40E_TR_L4_TYPE_TCP;
8463                         else
8464                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8465                                         I40E_TR_L4_TYPE_SCTP;
8466
8467                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8468                                 (teid_le >> 16) & 0xFFFF;
8469                         big_buffer = 1;
8470                 } else {
8471                         if (!pf->dport_replace_flag) {
8472                                 i40e_replace_port_l1_filter(pf,
8473                                                 tunnel_filter->l4_port_type);
8474                                 i40e_replace_port_cloud_filter(pf,
8475                                                 tunnel_filter->l4_port_type);
8476                                 pf->dport_replace_flag = 1;
8477                         }
8478                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8479                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8480                                 I40E_DIRECTION_INGRESS_KEY;
8481
8482                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8483                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8484                                         I40E_TR_L4_TYPE_UDP;
8485                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8486                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8487                                         I40E_TR_L4_TYPE_TCP;
8488                         else
8489                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8490                                         I40E_TR_L4_TYPE_SCTP;
8491
8492                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8493                                 (teid_le >> 16) & 0xFFFF;
8494                         big_buffer = 1;
8495                 }
8496
8497                 break;
8498         default:
8499                 /* Other tunnel types is not supported. */
8500                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8501                 rte_free(cld_filter);
8502                 return -EINVAL;
8503         }
8504
8505         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8506                 pfilter->element.flags =
8507                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8508         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8509                 pfilter->element.flags =
8510                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8511         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8512                 pfilter->element.flags =
8513                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8514         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8515                 pfilter->element.flags =
8516                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8517         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8518                 pfilter->element.flags |=
8519                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8520         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8521                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8522                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8523                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8524                         pfilter->element.flags |=
8525                                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8526                 else
8527                         pfilter->element.flags |=
8528                                 I40E_AQC_ADD_CLOUD_FILTER_0X10;
8529         } else {
8530                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8531                                                 &pfilter->element.flags);
8532                 if (val < 0) {
8533                         rte_free(cld_filter);
8534                         return -EINVAL;
8535                 }
8536         }
8537
8538         pfilter->element.flags |= rte_cpu_to_le_16(
8539                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8540                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8541         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8542         pfilter->element.queue_number =
8543                 rte_cpu_to_le_16(tunnel_filter->queue_id);
8544
8545         if (!tunnel_filter->is_to_vf)
8546                 vsi = pf->main_vsi;
8547         else {
8548                 if (tunnel_filter->vf_id >= pf->vf_num) {
8549                         PMD_DRV_LOG(ERR, "Invalid argument.");
8550                         rte_free(cld_filter);
8551                         return -EINVAL;
8552                 }
8553                 vf = &pf->vfs[tunnel_filter->vf_id];
8554                 vsi = vf->vsi;
8555         }
8556
8557         /* Check if there is the filter in SW list */
8558         memset(&check_filter, 0, sizeof(check_filter));
8559         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8560         check_filter.is_to_vf = tunnel_filter->is_to_vf;
8561         check_filter.vf_id = tunnel_filter->vf_id;
8562         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8563         if (add && node) {
8564                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8565                 rte_free(cld_filter);
8566                 return -EINVAL;
8567         }
8568
8569         if (!add && !node) {
8570                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8571                 rte_free(cld_filter);
8572                 return -EINVAL;
8573         }
8574
8575         if (add) {
8576                 if (big_buffer)
8577                         ret = i40e_aq_add_cloud_filters_bb(hw,
8578                                                    vsi->seid, cld_filter, 1);
8579                 else
8580                         ret = i40e_aq_add_cloud_filters(hw,
8581                                         vsi->seid, &cld_filter->element, 1);
8582                 if (ret < 0) {
8583                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8584                         rte_free(cld_filter);
8585                         return -ENOTSUP;
8586                 }
8587                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8588                 if (tunnel == NULL) {
8589                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8590                         rte_free(cld_filter);
8591                         return -ENOMEM;
8592                 }
8593
8594                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8595                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8596                 if (ret < 0)
8597                         rte_free(tunnel);
8598         } else {
8599                 if (big_buffer)
8600                         ret = i40e_aq_rem_cloud_filters_bb(
8601                                 hw, vsi->seid, cld_filter, 1);
8602                 else
8603                         ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8604                                                 &cld_filter->element, 1);
8605                 if (ret < 0) {
8606                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8607                         rte_free(cld_filter);
8608                         return -ENOTSUP;
8609                 }
8610                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8611         }
8612
8613         rte_free(cld_filter);
8614         return ret;
8615 }
8616
8617 static int
8618 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8619 {
8620         uint8_t i;
8621
8622         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8623                 if (pf->vxlan_ports[i] == port)
8624                         return i;
8625         }
8626
8627         return -1;
8628 }
8629
8630 static int
8631 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8632 {
8633         int  idx, ret;
8634         uint8_t filter_idx = 0;
8635         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8636
8637         idx = i40e_get_vxlan_port_idx(pf, port);
8638
8639         /* Check if port already exists */
8640         if (idx >= 0) {
8641                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8642                 return -EINVAL;
8643         }
8644
8645         /* Now check if there is space to add the new port */
8646         idx = i40e_get_vxlan_port_idx(pf, 0);
8647         if (idx < 0) {
8648                 PMD_DRV_LOG(ERR,
8649                         "Maximum number of UDP ports reached, not adding port %d",
8650                         port);
8651                 return -ENOSPC;
8652         }
8653
8654         ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8655                                         &filter_idx, NULL);
8656         if (ret < 0) {
8657                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8658                 return -1;
8659         }
8660
8661         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8662                          port,  filter_idx);
8663
8664         /* New port: add it and mark its index in the bitmap */
8665         pf->vxlan_ports[idx] = port;
8666         pf->vxlan_bitmap |= (1 << idx);
8667
8668         if (!(pf->flags & I40E_FLAG_VXLAN))
8669                 pf->flags |= I40E_FLAG_VXLAN;
8670
8671         return 0;
8672 }
8673
8674 static int
8675 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8676 {
8677         int idx;
8678         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8679
8680         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8681                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8682                 return -EINVAL;
8683         }
8684
8685         idx = i40e_get_vxlan_port_idx(pf, port);
8686
8687         if (idx < 0) {
8688                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8689                 return -EINVAL;
8690         }
8691
8692         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8693                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8694                 return -1;
8695         }
8696
8697         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8698                         port, idx);
8699
8700         pf->vxlan_ports[idx] = 0;
8701         pf->vxlan_bitmap &= ~(1 << idx);
8702
8703         if (!pf->vxlan_bitmap)
8704                 pf->flags &= ~I40E_FLAG_VXLAN;
8705
8706         return 0;
8707 }
8708
8709 /* Add UDP tunneling port */
8710 static int
8711 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8712                              struct rte_eth_udp_tunnel *udp_tunnel)
8713 {
8714         int ret = 0;
8715         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8716
8717         if (udp_tunnel == NULL)
8718                 return -EINVAL;
8719
8720         switch (udp_tunnel->prot_type) {
8721         case RTE_TUNNEL_TYPE_VXLAN:
8722                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8723                                           I40E_AQC_TUNNEL_TYPE_VXLAN);
8724                 break;
8725         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8726                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8727                                           I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8728                 break;
8729         case RTE_TUNNEL_TYPE_GENEVE:
8730         case RTE_TUNNEL_TYPE_TEREDO:
8731                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8732                 ret = -1;
8733                 break;
8734
8735         default:
8736                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8737                 ret = -1;
8738                 break;
8739         }
8740
8741         return ret;
8742 }
8743
8744 /* Remove UDP tunneling port */
8745 static int
8746 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8747                              struct rte_eth_udp_tunnel *udp_tunnel)
8748 {
8749         int ret = 0;
8750         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8751
8752         if (udp_tunnel == NULL)
8753                 return -EINVAL;
8754
8755         switch (udp_tunnel->prot_type) {
8756         case RTE_TUNNEL_TYPE_VXLAN:
8757         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8758                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8759                 break;
8760         case RTE_TUNNEL_TYPE_GENEVE:
8761         case RTE_TUNNEL_TYPE_TEREDO:
8762                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8763                 ret = -1;
8764                 break;
8765         default:
8766                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8767                 ret = -1;
8768                 break;
8769         }
8770
8771         return ret;
8772 }
8773
8774 /* Calculate the maximum number of contiguous PF queues that are configured */
8775 int
8776 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8777 {
8778         struct rte_eth_dev_data *data = pf->dev_data;
8779         int i, num;
8780         struct i40e_rx_queue *rxq;
8781
8782         num = 0;
8783         for (i = 0; i < pf->lan_nb_qps; i++) {
8784                 rxq = data->rx_queues[i];
8785                 if (rxq && rxq->q_set)
8786                         num++;
8787                 else
8788                         break;
8789         }
8790
8791         return num;
8792 }
8793
8794 /* Reset the global configure of hash function and input sets */
8795 static void
8796 i40e_pf_global_rss_reset(struct i40e_pf *pf)
8797 {
8798         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8799         uint32_t reg, reg_val;
8800         int i;
8801
8802         /* Reset global RSS function sets */
8803         reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8804         if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) {
8805                 reg_val |= I40E_GLQF_CTL_HTOEP_MASK;
8806                 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val);
8807         }
8808
8809         for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) {
8810                 uint64_t inset;
8811                 int j, pctype;
8812
8813                 if (hw->mac.type == I40E_MAC_X722)
8814                         pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i));
8815                 else
8816                         pctype = i;
8817
8818                 /* Reset pctype insets */
8819                 inset = i40e_get_default_input_set(i);
8820                 if (inset) {
8821                         pf->hash_input_set[pctype] = inset;
8822                         inset = i40e_translate_input_set_reg(hw->mac.type,
8823                                                              inset);
8824
8825                         reg = I40E_GLQF_HASH_INSET(0, pctype);
8826                         i40e_check_write_global_reg(hw, reg, (uint32_t)inset);
8827                         reg = I40E_GLQF_HASH_INSET(1, pctype);
8828                         i40e_check_write_global_reg(hw, reg,
8829                                                     (uint32_t)(inset >> 32));
8830
8831                         /* Clear unused mask registers of the pctype */
8832                         for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) {
8833                                 reg = I40E_GLQF_HASH_MSK(j, pctype);
8834                                 i40e_check_write_global_reg(hw, reg, 0);
8835                         }
8836                 }
8837
8838                 /* Reset pctype symmetric sets */
8839                 reg = I40E_GLQF_HSYM(pctype);
8840                 reg_val = i40e_read_rx_ctl(hw, reg);
8841                 if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8842                         reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
8843                         i40e_write_global_rx_ctl(hw, reg, reg_val);
8844                 }
8845         }
8846         I40E_WRITE_FLUSH(hw);
8847 }
8848
8849 int
8850 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
8851 {
8852         struct i40e_hw *hw = &pf->adapter->hw;
8853         uint8_t lut[ETH_RSS_RETA_SIZE_512];
8854         uint32_t i;
8855         int num;
8856
8857         /* If both VMDQ and RSS enabled, not all of PF queues are
8858          * configured. It's necessary to calculate the actual PF
8859          * queues that are configured.
8860          */
8861         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8862                 num = i40e_pf_calc_configured_queues_num(pf);
8863         else
8864                 num = pf->dev_data->nb_rx_queues;
8865
8866         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8867         if (num <= 0)
8868                 return 0;
8869
8870         for (i = 0; i < hw->func_caps.rss_table_size; i++)
8871                 lut[i] = (uint8_t)(i % (uint32_t)num);
8872
8873         return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
8874 }
8875
8876 int
8877 i40e_pf_reset_rss_key(struct i40e_pf *pf)
8878 {
8879         const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8880                         sizeof(uint32_t);
8881         uint8_t *rss_key;
8882
8883         /* Reset key */
8884         rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key;
8885         if (!rss_key ||
8886             pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) {
8887                 static uint32_t rss_key_default[] = {0x6b793944,
8888                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8889                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8890                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8891
8892                 rss_key = (uint8_t *)rss_key_default;
8893         }
8894
8895         return i40e_set_rss_key(pf->main_vsi, rss_key, key_len);
8896 }
8897
8898 static int
8899 i40e_pf_rss_reset(struct i40e_pf *pf)
8900 {
8901         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8902
8903         int ret;
8904
8905         pf->hash_filter_enabled = 0;
8906         i40e_pf_disable_rss(pf);
8907         i40e_set_symmetric_hash_enable_per_port(hw, 0);
8908
8909         if (!pf->support_multi_driver)
8910                 i40e_pf_global_rss_reset(pf);
8911
8912         /* Reset RETA table */
8913         if (pf->adapter->rss_reta_updated == 0) {
8914                 ret = i40e_pf_reset_rss_reta(pf);
8915                 if (ret)
8916                         return ret;
8917         }
8918
8919         return i40e_pf_reset_rss_key(pf);
8920 }
8921
8922 /* Configure RSS */
8923 int
8924 i40e_pf_config_rss(struct i40e_pf *pf)
8925 {
8926         struct i40e_hw *hw;
8927         enum rte_eth_rx_mq_mode mq_mode;
8928         uint64_t rss_hf, hena;
8929         int ret;
8930
8931         ret = i40e_pf_rss_reset(pf);
8932         if (ret) {
8933                 PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled");
8934                 return ret;
8935         }
8936
8937         rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
8938         mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8939         if (!(rss_hf & pf->adapter->flow_types_mask) ||
8940             !(mq_mode & ETH_MQ_RX_RSS_FLAG))
8941                 return 0;
8942
8943         hw = I40E_PF_TO_HW(pf);
8944         hena = i40e_config_hena(pf->adapter, rss_hf);
8945         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
8946         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
8947         I40E_WRITE_FLUSH(hw);
8948
8949         return 0;
8950 }
8951
8952 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8953 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8954 int
8955 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8956 {
8957         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8958         uint32_t val, reg;
8959         int ret = -EINVAL;
8960
8961         if (pf->support_multi_driver) {
8962                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8963                 return -ENOTSUP;
8964         }
8965
8966         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8967         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8968
8969         if (len == 3) {
8970                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8971         } else if (len == 4) {
8972                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8973         } else {
8974                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8975                 return ret;
8976         }
8977
8978         if (reg != val) {
8979                 ret = i40e_aq_debug_write_global_register(hw,
8980                                                    I40E_GL_PRS_FVBM(2),
8981                                                    reg, NULL);
8982                 if (ret != 0)
8983                         return ret;
8984                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8985                             "with value 0x%08x",
8986                             I40E_GL_PRS_FVBM(2), reg);
8987         } else {
8988                 ret = 0;
8989         }
8990         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8991                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8992
8993         return ret;
8994 }
8995
8996 /* Set the symmetric hash enable configurations per port */
8997 void
8998 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8999 {
9000         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9001
9002         if (enable > 0) {
9003                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)
9004                         return;
9005
9006                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9007         } else {
9008                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK))
9009                         return;
9010
9011                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9012         }
9013         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
9014         I40E_WRITE_FLUSH(hw);
9015 }
9016
9017 /**
9018  * Valid input sets for hash and flow director filters per PCTYPE
9019  */
9020 static uint64_t
9021 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9022                 enum rte_filter_type filter)
9023 {
9024         uint64_t valid;
9025
9026         static const uint64_t valid_hash_inset_table[] = {
9027                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9028                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9029                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9030                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9031                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9032                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9033                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9034                         I40E_INSET_FLEX_PAYLOAD,
9035                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9036                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9037                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9038                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9039                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9040                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9041                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9042                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9043                         I40E_INSET_FLEX_PAYLOAD,
9044                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9045                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9046                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9047                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9048                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9049                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9050                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9051                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9052                         I40E_INSET_FLEX_PAYLOAD,
9053                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9054                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9055                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9056                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9057                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9058                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9059                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9060                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9061                         I40E_INSET_FLEX_PAYLOAD,
9062                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9063                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9064                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9065                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9066                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9067                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9068                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9069                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9070                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9071                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9072                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9073                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9074                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9075                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9076                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9077                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9078                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9079                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9080                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9081                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9082                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9083                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9084                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9085                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9086                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9087                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9088                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9089                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9090                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9091                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9092                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9093                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9094                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9095                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9096                         I40E_INSET_FLEX_PAYLOAD,
9097                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9098                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9099                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9100                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9101                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9102                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9103                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9104                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9105                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9106                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9107                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9108                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9109                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9110                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9111                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9112                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9113                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9114                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9115                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9116                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9117                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9118                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9119                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9120                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9121                         I40E_INSET_FLEX_PAYLOAD,
9122                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9123                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9124                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9125                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9126                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9127                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9128                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9129                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9130                         I40E_INSET_FLEX_PAYLOAD,
9131                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9132                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9133                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9134                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9135                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9136                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9137                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9138                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9139                         I40E_INSET_FLEX_PAYLOAD,
9140                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9141                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9142                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9143                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9144                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9145                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9146                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9147                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9148                         I40E_INSET_FLEX_PAYLOAD,
9149                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9150                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9151                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9152                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9153                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9154                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9155                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9156                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9157                         I40E_INSET_FLEX_PAYLOAD,
9158                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9159                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9160                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9161                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9162                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9163                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9164                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9165                         I40E_INSET_FLEX_PAYLOAD,
9166                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9167                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9168                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9169                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9170                         I40E_INSET_FLEX_PAYLOAD,
9171         };
9172
9173         /**
9174          * Flow director supports only fields defined in
9175          * union rte_eth_fdir_flow.
9176          */
9177         static const uint64_t valid_fdir_inset_table[] = {
9178                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9179                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9180                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9181                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9182                 I40E_INSET_IPV4_TTL,
9183                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9184                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9185                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9186                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9187                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9188                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9189                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9190                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9191                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9192                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9193                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9194                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9195                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9196                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9197                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9198                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9199                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9200                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9201                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9202                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9203                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9204                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9205                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9206                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9207                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9208                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9209                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9210                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9211                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9212                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9213                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9214                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9215                 I40E_INSET_SCTP_VT,
9216                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9217                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9218                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9219                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9220                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9221                 I40E_INSET_IPV4_TTL,
9222                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9223                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9224                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9225                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9226                 I40E_INSET_IPV6_HOP_LIMIT,
9227                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9228                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9229                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9230                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9231                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9232                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9233                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9234                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9235                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9236                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9237                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9238                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9239                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9240                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9241                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9242                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9243                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9244                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9245                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9246                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9247                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9248                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9249                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9250                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9251                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9252                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9253                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9254                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9255                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9256                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9257                 I40E_INSET_SCTP_VT,
9258                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9259                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9260                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9261                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9262                 I40E_INSET_IPV6_HOP_LIMIT,
9263                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9264                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9265                 I40E_INSET_LAST_ETHER_TYPE,
9266         };
9267
9268         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9269                 return 0;
9270         if (filter == RTE_ETH_FILTER_HASH)
9271                 valid = valid_hash_inset_table[pctype];
9272         else
9273                 valid = valid_fdir_inset_table[pctype];
9274
9275         return valid;
9276 }
9277
9278 /**
9279  * Validate if the input set is allowed for a specific PCTYPE
9280  */
9281 int
9282 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9283                 enum rte_filter_type filter, uint64_t inset)
9284 {
9285         uint64_t valid;
9286
9287         valid = i40e_get_valid_input_set(pctype, filter);
9288         if (inset & (~valid))
9289                 return -EINVAL;
9290
9291         return 0;
9292 }
9293
9294 /* default input set fields combination per pctype */
9295 uint64_t
9296 i40e_get_default_input_set(uint16_t pctype)
9297 {
9298         static const uint64_t default_inset_table[] = {
9299                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9300                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9301                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9302                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9303                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9304                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9305                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9306                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9307                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9308                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9309                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9310                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9311                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9312                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9313                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9314                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9315                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9316                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9317                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9318                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9319                         I40E_INSET_SCTP_VT,
9320                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9321                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9322                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9323                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9324                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9325                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9326                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9327                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9328                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9329                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9330                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9331                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9332                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9333                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9334                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9335                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9336                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9337                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9338                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9339                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9340                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9341                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9342                         I40E_INSET_SCTP_VT,
9343                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9344                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9345                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9346                         I40E_INSET_LAST_ETHER_TYPE,
9347         };
9348
9349         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9350                 return 0;
9351
9352         return default_inset_table[pctype];
9353 }
9354
9355 /**
9356  * Translate the input set from bit masks to register aware bit masks
9357  * and vice versa
9358  */
9359 uint64_t
9360 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9361 {
9362         uint64_t val = 0;
9363         uint16_t i;
9364
9365         struct inset_map {
9366                 uint64_t inset;
9367                 uint64_t inset_reg;
9368         };
9369
9370         static const struct inset_map inset_map_common[] = {
9371                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9372                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9373                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9374                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9375                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9376                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9377                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9378                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9379                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9380                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9381                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9382                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9383                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9384                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9385                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9386                 {I40E_INSET_TUNNEL_DMAC,
9387                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9388                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9389                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9390                 {I40E_INSET_TUNNEL_SRC_PORT,
9391                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9392                 {I40E_INSET_TUNNEL_DST_PORT,
9393                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9394                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9395                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9396                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9397                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9398                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9399                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9400                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9401                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9402                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9403         };
9404
9405     /* some different registers map in x722*/
9406         static const struct inset_map inset_map_diff_x722[] = {
9407                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9408                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9409                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9410                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9411         };
9412
9413         static const struct inset_map inset_map_diff_not_x722[] = {
9414                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9415                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9416                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9417                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9418         };
9419
9420         if (input == 0)
9421                 return val;
9422
9423         /* Translate input set to register aware inset */
9424         if (type == I40E_MAC_X722) {
9425                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9426                         if (input & inset_map_diff_x722[i].inset)
9427                                 val |= inset_map_diff_x722[i].inset_reg;
9428                 }
9429         } else {
9430                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9431                         if (input & inset_map_diff_not_x722[i].inset)
9432                                 val |= inset_map_diff_not_x722[i].inset_reg;
9433                 }
9434         }
9435
9436         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9437                 if (input & inset_map_common[i].inset)
9438                         val |= inset_map_common[i].inset_reg;
9439         }
9440
9441         return val;
9442 }
9443
9444 static int
9445 i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start,
9446                             uint32_t pit_reg_count, uint32_t hdr_off)
9447 {
9448         const uint32_t pit_reg_end = pit_reg_start + pit_reg_count;
9449         uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off);
9450         uint32_t i, reg_val, src_off, count;
9451
9452         for (i = pit_reg_start; i < pit_reg_end; i++) {
9453                 reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i));
9454
9455                 src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val);
9456                 count = I40E_GLQF_PIT_FSIZE_GET(reg_val);
9457
9458                 if (src_off <= field_off && (src_off + count) > field_off)
9459                         break;
9460         }
9461
9462         if (i >= pit_reg_end) {
9463                 PMD_DRV_LOG(ERR,
9464                             "Hardware GLQF_PIT configuration does not support this field mask");
9465                 return -1;
9466         }
9467
9468         return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off;
9469 }
9470
9471 int
9472 i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset,
9473                              uint32_t *mask, uint8_t nb_elem)
9474 {
9475         static const uint64_t mask_inset[] = {
9476                 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL,
9477                 I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT };
9478
9479         static const struct {
9480                 uint64_t inset;
9481                 uint32_t mask;
9482                 uint32_t offset;
9483         } inset_mask_offset_map[] = {
9484                 { I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK,
9485                   offsetof(struct rte_ipv4_hdr, type_of_service) },
9486
9487                 { I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK,
9488                   offsetof(struct rte_ipv4_hdr, next_proto_id) },
9489
9490                 { I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK,
9491                   offsetof(struct rte_ipv4_hdr, time_to_live) },
9492
9493                 { I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK,
9494                   offsetof(struct rte_ipv6_hdr, vtc_flow) },
9495
9496                 { I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK,
9497                   offsetof(struct rte_ipv6_hdr, proto) },
9498
9499                 { I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK,
9500                   offsetof(struct rte_ipv6_hdr, hop_limits) },
9501         };
9502
9503         uint32_t i;
9504         int idx = 0;
9505
9506         assert(mask);
9507         if (!inset)
9508                 return 0;
9509
9510         for (i = 0; i < RTE_DIM(mask_inset); i++) {
9511                 /* Clear the inset bit, if no MASK is required,
9512                  * for example proto + ttl
9513                  */
9514                 if ((mask_inset[i] & inset) == mask_inset[i]) {
9515                         inset &= ~mask_inset[i];
9516                         if (!inset)
9517                                 return 0;
9518                 }
9519         }
9520
9521         for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) {
9522                 uint32_t pit_start, pit_count;
9523                 int offset;
9524
9525                 if (!(inset_mask_offset_map[i].inset & inset))
9526                         continue;
9527
9528                 if (inset_mask_offset_map[i].inset &
9529                     (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9530                      I40E_INSET_IPV4_TTL)) {
9531                         pit_start = I40E_GLQF_PIT_IPV4_START;
9532                         pit_count = I40E_GLQF_PIT_IPV4_COUNT;
9533                 } else {
9534                         pit_start = I40E_GLQF_PIT_IPV6_START;
9535                         pit_count = I40E_GLQF_PIT_IPV6_COUNT;
9536                 }
9537
9538                 offset = i40e_get_inset_field_offset(hw, pit_start, pit_count,
9539                                 inset_mask_offset_map[i].offset);
9540
9541                 if (offset < 0)
9542                         return -EINVAL;
9543
9544                 if (idx >= nb_elem) {
9545                         PMD_DRV_LOG(ERR,
9546                                     "Configuration of inset mask out of range %u",
9547                                     nb_elem);
9548                         return -ERANGE;
9549                 }
9550
9551                 mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset,
9552                                                 inset_mask_offset_map[i].mask);
9553                 idx++;
9554         }
9555
9556         return idx;
9557 }
9558
9559 void
9560 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9561 {
9562         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9563
9564         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9565         if (reg != val)
9566                 i40e_write_rx_ctl(hw, addr, val);
9567         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9568                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9569 }
9570
9571 void
9572 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9573 {
9574         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9575         struct rte_eth_dev *dev;
9576
9577         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
9578         if (reg != val) {
9579                 i40e_write_rx_ctl(hw, addr, val);
9580                 PMD_DRV_LOG(WARNING,
9581                             "i40e device %s changed global register [0x%08x]."
9582                             " original: 0x%08x, new: 0x%08x",
9583                             dev->device->name, addr, reg,
9584                             (uint32_t)i40e_read_rx_ctl(hw, addr));
9585         }
9586 }
9587
9588 static void
9589 i40e_filter_input_set_init(struct i40e_pf *pf)
9590 {
9591         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9592         enum i40e_filter_pctype pctype;
9593         uint64_t input_set, inset_reg;
9594         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9595         int num, i;
9596         uint16_t flow_type;
9597
9598         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9599              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9600                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9601
9602                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9603                         continue;
9604
9605                 input_set = i40e_get_default_input_set(pctype);
9606
9607                 num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9608                                                    I40E_INSET_MASK_NUM_REG);
9609                 if (num < 0)
9610                         return;
9611                 if (pf->support_multi_driver && num > 0) {
9612                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9613                         return;
9614                 }
9615                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9616                                         input_set);
9617
9618                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9619                                       (uint32_t)(inset_reg & UINT32_MAX));
9620                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9621                                      (uint32_t)((inset_reg >>
9622                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
9623                 if (!pf->support_multi_driver) {
9624                         i40e_check_write_global_reg(hw,
9625                                             I40E_GLQF_HASH_INSET(0, pctype),
9626                                             (uint32_t)(inset_reg & UINT32_MAX));
9627                         i40e_check_write_global_reg(hw,
9628                                              I40E_GLQF_HASH_INSET(1, pctype),
9629                                              (uint32_t)((inset_reg >>
9630                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
9631
9632                         for (i = 0; i < num; i++) {
9633                                 i40e_check_write_global_reg(hw,
9634                                                     I40E_GLQF_FD_MSK(i, pctype),
9635                                                     mask_reg[i]);
9636                                 i40e_check_write_global_reg(hw,
9637                                                   I40E_GLQF_HASH_MSK(i, pctype),
9638                                                   mask_reg[i]);
9639                         }
9640                         /*clear unused mask registers of the pctype */
9641                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9642                                 i40e_check_write_global_reg(hw,
9643                                                     I40E_GLQF_FD_MSK(i, pctype),
9644                                                     0);
9645                                 i40e_check_write_global_reg(hw,
9646                                                   I40E_GLQF_HASH_MSK(i, pctype),
9647                                                   0);
9648                         }
9649                 } else {
9650                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9651                 }
9652                 I40E_WRITE_FLUSH(hw);
9653
9654                 /* store the default input set */
9655                 if (!pf->support_multi_driver)
9656                         pf->hash_input_set[pctype] = input_set;
9657                 pf->fdir.input_set[pctype] = input_set;
9658         }
9659 }
9660
9661 int
9662 i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
9663                     uint32_t pctype, bool add)
9664 {
9665         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9666         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9667         uint64_t inset_reg = 0;
9668         int num, i;
9669
9670         if (pf->support_multi_driver) {
9671                 PMD_DRV_LOG(ERR,
9672                             "Modify input set is not permitted when multi-driver enabled.");
9673                 return -EPERM;
9674         }
9675
9676         /* For X722, get translated pctype in fd pctype register */
9677         if (hw->mac.type == I40E_MAC_X722)
9678                 pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
9679
9680         if (add) {
9681                 /* get inset value in register */
9682                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9683                 inset_reg <<= I40E_32_BIT_WIDTH;
9684                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9685                 input_set |= pf->hash_input_set[pctype];
9686         }
9687         num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9688                                            I40E_INSET_MASK_NUM_REG);
9689         if (num < 0)
9690                 return -EINVAL;
9691
9692         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9693
9694         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9695                                     (uint32_t)(inset_reg & UINT32_MAX));
9696         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9697                                     (uint32_t)((inset_reg >>
9698                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
9699
9700         for (i = 0; i < num; i++)
9701                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9702                                             mask_reg[i]);
9703         /*clear unused mask registers of the pctype */
9704         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9705                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9706                                             0);
9707         I40E_WRITE_FLUSH(hw);
9708
9709         pf->hash_input_set[pctype] = input_set;
9710         return 0;
9711 }
9712
9713 /* Convert ethertype filter structure */
9714 static int
9715 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9716                               struct i40e_ethertype_filter *filter)
9717 {
9718         rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
9719                 RTE_ETHER_ADDR_LEN);
9720         filter->input.ether_type = input->ether_type;
9721         filter->flags = input->flags;
9722         filter->queue = input->queue;
9723
9724         return 0;
9725 }
9726
9727 /* Check if there exists the ehtertype filter */
9728 struct i40e_ethertype_filter *
9729 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9730                                 const struct i40e_ethertype_filter_input *input)
9731 {
9732         int ret;
9733
9734         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9735         if (ret < 0)
9736                 return NULL;
9737
9738         return ethertype_rule->hash_map[ret];
9739 }
9740
9741 /* Add ethertype filter in SW list */
9742 static int
9743 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9744                                 struct i40e_ethertype_filter *filter)
9745 {
9746         struct i40e_ethertype_rule *rule = &pf->ethertype;
9747         int ret;
9748
9749         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9750         if (ret < 0) {
9751                 PMD_DRV_LOG(ERR,
9752                             "Failed to insert ethertype filter"
9753                             " to hash table %d!",
9754                             ret);
9755                 return ret;
9756         }
9757         rule->hash_map[ret] = filter;
9758
9759         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9760
9761         return 0;
9762 }
9763
9764 /* Delete ethertype filter in SW list */
9765 int
9766 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9767                              struct i40e_ethertype_filter_input *input)
9768 {
9769         struct i40e_ethertype_rule *rule = &pf->ethertype;
9770         struct i40e_ethertype_filter *filter;
9771         int ret;
9772
9773         ret = rte_hash_del_key(rule->hash_table, input);
9774         if (ret < 0) {
9775                 PMD_DRV_LOG(ERR,
9776                             "Failed to delete ethertype filter"
9777                             " to hash table %d!",
9778                             ret);
9779                 return ret;
9780         }
9781         filter = rule->hash_map[ret];
9782         rule->hash_map[ret] = NULL;
9783
9784         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9785         rte_free(filter);
9786
9787         return 0;
9788 }
9789
9790 /*
9791  * Configure ethertype filter, which can director packet by filtering
9792  * with mac address and ether_type or only ether_type
9793  */
9794 int
9795 i40e_ethertype_filter_set(struct i40e_pf *pf,
9796                         struct rte_eth_ethertype_filter *filter,
9797                         bool add)
9798 {
9799         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9800         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9801         struct i40e_ethertype_filter *ethertype_filter, *node;
9802         struct i40e_ethertype_filter check_filter;
9803         struct i40e_control_filter_stats stats;
9804         uint16_t flags = 0;
9805         int ret;
9806
9807         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9808                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9809                 return -EINVAL;
9810         }
9811         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
9812                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
9813                 PMD_DRV_LOG(ERR,
9814                         "unsupported ether_type(0x%04x) in control packet filter.",
9815                         filter->ether_type);
9816                 return -EINVAL;
9817         }
9818         if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
9819                 PMD_DRV_LOG(WARNING,
9820                         "filter vlan ether_type in first tag is not supported.");
9821
9822         /* Check if there is the filter in SW list */
9823         memset(&check_filter, 0, sizeof(check_filter));
9824         i40e_ethertype_filter_convert(filter, &check_filter);
9825         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9826                                                &check_filter.input);
9827         if (add && node) {
9828                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9829                 return -EINVAL;
9830         }
9831
9832         if (!add && !node) {
9833                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9834                 return -EINVAL;
9835         }
9836
9837         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9838                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9839         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9840                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9841         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9842
9843         memset(&stats, 0, sizeof(stats));
9844         ret = i40e_aq_add_rem_control_packet_filter(hw,
9845                         filter->mac_addr.addr_bytes,
9846                         filter->ether_type, flags,
9847                         pf->main_vsi->seid,
9848                         filter->queue, add, &stats, NULL);
9849
9850         PMD_DRV_LOG(INFO,
9851                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9852                 ret, stats.mac_etype_used, stats.etype_used,
9853                 stats.mac_etype_free, stats.etype_free);
9854         if (ret < 0)
9855                 return -ENOSYS;
9856
9857         /* Add or delete a filter in SW list */
9858         if (add) {
9859                 ethertype_filter = rte_zmalloc("ethertype_filter",
9860                                        sizeof(*ethertype_filter), 0);
9861                 if (ethertype_filter == NULL) {
9862                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9863                         return -ENOMEM;
9864                 }
9865
9866                 rte_memcpy(ethertype_filter, &check_filter,
9867                            sizeof(check_filter));
9868                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9869                 if (ret < 0)
9870                         rte_free(ethertype_filter);
9871         } else {
9872                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9873         }
9874
9875         return ret;
9876 }
9877
9878 static int
9879 i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
9880                       const struct rte_flow_ops **ops)
9881 {
9882         if (dev == NULL)
9883                 return -EINVAL;
9884
9885         *ops = &i40e_flow_ops;
9886         return 0;
9887 }
9888
9889 /*
9890  * Check and enable Extended Tag.
9891  * Enabling Extended Tag is important for 40G performance.
9892  */
9893 static void
9894 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9895 {
9896         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9897         uint32_t buf = 0;
9898         int ret;
9899
9900         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9901                                       PCI_DEV_CAP_REG);
9902         if (ret < 0) {
9903                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9904                             PCI_DEV_CAP_REG);
9905                 return;
9906         }
9907         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9908                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9909                 return;
9910         }
9911
9912         buf = 0;
9913         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9914                                       PCI_DEV_CTRL_REG);
9915         if (ret < 0) {
9916                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9917                             PCI_DEV_CTRL_REG);
9918                 return;
9919         }
9920         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9921                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9922                 return;
9923         }
9924         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9925         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9926                                        PCI_DEV_CTRL_REG);
9927         if (ret < 0) {
9928                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9929                             PCI_DEV_CTRL_REG);
9930                 return;
9931         }
9932 }
9933
9934 /*
9935  * As some registers wouldn't be reset unless a global hardware reset,
9936  * hardware initialization is needed to put those registers into an
9937  * expected initial state.
9938  */
9939 static void
9940 i40e_hw_init(struct rte_eth_dev *dev)
9941 {
9942         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9943
9944         i40e_enable_extended_tag(dev);
9945
9946         /* clear the PF Queue Filter control register */
9947         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9948
9949         /* Disable symmetric hash per port */
9950         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9951 }
9952
9953 /*
9954  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9955  * however this function will return only one highest pctype index,
9956  * which is not quite correct. This is known problem of i40e driver
9957  * and needs to be fixed later.
9958  */
9959 enum i40e_filter_pctype
9960 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9961 {
9962         int i;
9963         uint64_t pctype_mask;
9964
9965         if (flow_type < I40E_FLOW_TYPE_MAX) {
9966                 pctype_mask = adapter->pctypes_tbl[flow_type];
9967                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9968                         if (pctype_mask & (1ULL << i))
9969                                 return (enum i40e_filter_pctype)i;
9970                 }
9971         }
9972         return I40E_FILTER_PCTYPE_INVALID;
9973 }
9974
9975 uint16_t
9976 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9977                         enum i40e_filter_pctype pctype)
9978 {
9979         uint16_t flowtype;
9980         uint64_t pctype_mask = 1ULL << pctype;
9981
9982         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9983              flowtype++) {
9984                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9985                         return flowtype;
9986         }
9987
9988         return RTE_ETH_FLOW_UNKNOWN;
9989 }
9990
9991 /*
9992  * On X710, performance number is far from the expectation on recent firmware
9993  * versions; on XL710, performance number is also far from the expectation on
9994  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9995  * mode is enabled and port MAC address is equal to the packet destination MAC
9996  * address. The fix for this issue may not be integrated in the following
9997  * firmware version. So the workaround in software driver is needed. It needs
9998  * to modify the initial values of 3 internal only registers for both X710 and
9999  * XL710. Note that the values for X710 or XL710 could be different, and the
10000  * workaround can be removed when it is fixed in firmware in the future.
10001  */
10002
10003 /* For both X710 and XL710 */
10004 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
10005 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
10006 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
10007
10008 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10009 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
10010
10011 /* For X722 */
10012 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10013 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10014
10015 /* For X710 */
10016 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10017 /* For XL710 */
10018 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10019 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10020
10021 /*
10022  * GL_SWR_PM_UP_THR:
10023  * The value is not impacted from the link speed, its value is set according
10024  * to the total number of ports for a better pipe-monitor configuration.
10025  */
10026 static bool
10027 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10028 {
10029 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10030                 .device_id = (dev),   \
10031                 .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10032
10033 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10034                 .device_id = (dev),   \
10035                 .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10036
10037         static const struct {
10038                 uint16_t device_id;
10039                 uint32_t val;
10040         } swr_pm_table[] = {
10041                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10042                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10043                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10044                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10045                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10046
10047                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10048                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10049                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10050                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10051                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10052                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10053                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10054         };
10055         uint32_t i;
10056
10057         if (value == NULL) {
10058                 PMD_DRV_LOG(ERR, "value is NULL");
10059                 return false;
10060         }
10061
10062         for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10063                 if (hw->device_id == swr_pm_table[i].device_id) {
10064                         *value = swr_pm_table[i].val;
10065
10066                         PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10067                                     "value - 0x%08x",
10068                                     hw->device_id, *value);
10069                         return true;
10070                 }
10071         }
10072
10073         return false;
10074 }
10075
10076 static int
10077 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10078 {
10079         enum i40e_status_code status;
10080         struct i40e_aq_get_phy_abilities_resp phy_ab;
10081         int ret = -ENOTSUP;
10082         int retries = 0;
10083
10084         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10085                                               NULL);
10086
10087         while (status) {
10088                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10089                         status);
10090                 retries++;
10091                 rte_delay_us(100000);
10092                 if  (retries < 5)
10093                         status = i40e_aq_get_phy_capabilities(hw, false,
10094                                         true, &phy_ab, NULL);
10095                 else
10096                         return ret;
10097         }
10098         return 0;
10099 }
10100
10101 static void
10102 i40e_configure_registers(struct i40e_hw *hw)
10103 {
10104         static struct {
10105                 uint32_t addr;
10106                 uint64_t val;
10107         } reg_table[] = {
10108                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10109                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10110                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10111         };
10112         uint64_t reg;
10113         uint32_t i;
10114         int ret;
10115
10116         for (i = 0; i < RTE_DIM(reg_table); i++) {
10117                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10118                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10119                                 reg_table[i].val =
10120                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10121                         else /* For X710/XL710/XXV710 */
10122                                 if (hw->aq.fw_maj_ver < 6)
10123                                         reg_table[i].val =
10124                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10125                                 else
10126                                         reg_table[i].val =
10127                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10128                 }
10129
10130                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10131                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10132                                 reg_table[i].val =
10133                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10134                         else /* For X710/XL710/XXV710 */
10135                                 reg_table[i].val =
10136                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10137                 }
10138
10139                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10140                         uint32_t cfg_val;
10141
10142                         if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10143                                 PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10144                                             "GL_SWR_PM_UP_THR value fixup",
10145                                             hw->device_id);
10146                                 continue;
10147                         }
10148
10149                         reg_table[i].val = cfg_val;
10150                 }
10151
10152                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10153                                                         &reg, NULL);
10154                 if (ret < 0) {
10155                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10156                                                         reg_table[i].addr);
10157                         break;
10158                 }
10159                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10160                                                 reg_table[i].addr, reg);
10161                 if (reg == reg_table[i].val)
10162                         continue;
10163
10164                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10165                                                 reg_table[i].val, NULL);
10166                 if (ret < 0) {
10167                         PMD_DRV_LOG(ERR,
10168                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10169                                 reg_table[i].val, reg_table[i].addr);
10170                         break;
10171                 }
10172                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10173                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10174         }
10175 }
10176
10177 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10178 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10179 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10180 static int
10181 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10182 {
10183         uint32_t reg;
10184         int ret;
10185
10186         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10187                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10188                 return -EINVAL;
10189         }
10190
10191         /* Configure for double VLAN RX stripping */
10192         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10193         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10194                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10195                 ret = i40e_aq_debug_write_register(hw,
10196                                                    I40E_VSI_TSR(vsi->vsi_id),
10197                                                    reg, NULL);
10198                 if (ret < 0) {
10199                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10200                                     vsi->vsi_id);
10201                         return I40E_ERR_CONFIG;
10202                 }
10203         }
10204
10205         /* Configure for double VLAN TX insertion */
10206         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10207         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10208                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10209                 ret = i40e_aq_debug_write_register(hw,
10210                                                    I40E_VSI_L2TAGSTXVALID(
10211                                                    vsi->vsi_id), reg, NULL);
10212                 if (ret < 0) {
10213                         PMD_DRV_LOG(ERR,
10214                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
10215                                 vsi->vsi_id);
10216                         return I40E_ERR_CONFIG;
10217                 }
10218         }
10219
10220         return 0;
10221 }
10222
10223 /**
10224  * i40e_aq_add_mirror_rule
10225  * @hw: pointer to the hardware structure
10226  * @seid: VEB seid to add mirror rule to
10227  * @dst_id: destination vsi seid
10228  * @entries: Buffer which contains the entities to be mirrored
10229  * @count: number of entities contained in the buffer
10230  * @rule_id:the rule_id of the rule to be added
10231  *
10232  * Add a mirror rule for a given veb.
10233  *
10234  **/
10235 static enum i40e_status_code
10236 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10237                         uint16_t seid, uint16_t dst_id,
10238                         uint16_t rule_type, uint16_t *entries,
10239                         uint16_t count, uint16_t *rule_id)
10240 {
10241         struct i40e_aq_desc desc;
10242         struct i40e_aqc_add_delete_mirror_rule cmd;
10243         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10244                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
10245                 &desc.params.raw;
10246         uint16_t buff_len;
10247         enum i40e_status_code status;
10248
10249         i40e_fill_default_direct_cmd_desc(&desc,
10250                                           i40e_aqc_opc_add_mirror_rule);
10251         memset(&cmd, 0, sizeof(cmd));
10252
10253         buff_len = sizeof(uint16_t) * count;
10254         desc.datalen = rte_cpu_to_le_16(buff_len);
10255         if (buff_len > 0)
10256                 desc.flags |= rte_cpu_to_le_16(
10257                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10258         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10259                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10260         cmd.num_entries = rte_cpu_to_le_16(count);
10261         cmd.seid = rte_cpu_to_le_16(seid);
10262         cmd.destination = rte_cpu_to_le_16(dst_id);
10263
10264         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10265         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10266         PMD_DRV_LOG(INFO,
10267                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10268                 hw->aq.asq_last_status, resp->rule_id,
10269                 resp->mirror_rules_used, resp->mirror_rules_free);
10270         *rule_id = rte_le_to_cpu_16(resp->rule_id);
10271
10272         return status;
10273 }
10274
10275 /**
10276  * i40e_aq_del_mirror_rule
10277  * @hw: pointer to the hardware structure
10278  * @seid: VEB seid to add mirror rule to
10279  * @entries: Buffer which contains the entities to be mirrored
10280  * @count: number of entities contained in the buffer
10281  * @rule_id:the rule_id of the rule to be delete
10282  *
10283  * Delete a mirror rule for a given veb.
10284  *
10285  **/
10286 static enum i40e_status_code
10287 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10288                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10289                 uint16_t count, uint16_t rule_id)
10290 {
10291         struct i40e_aq_desc desc;
10292         struct i40e_aqc_add_delete_mirror_rule cmd;
10293         uint16_t buff_len = 0;
10294         enum i40e_status_code status;
10295         void *buff = NULL;
10296
10297         i40e_fill_default_direct_cmd_desc(&desc,
10298                                           i40e_aqc_opc_delete_mirror_rule);
10299         memset(&cmd, 0, sizeof(cmd));
10300         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10301                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10302                                                           I40E_AQ_FLAG_RD));
10303                 cmd.num_entries = count;
10304                 buff_len = sizeof(uint16_t) * count;
10305                 desc.datalen = rte_cpu_to_le_16(buff_len);
10306                 buff = (void *)entries;
10307         } else
10308                 /* rule id is filled in destination field for deleting mirror rule */
10309                 cmd.destination = rte_cpu_to_le_16(rule_id);
10310
10311         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10312                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10313         cmd.seid = rte_cpu_to_le_16(seid);
10314
10315         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10316         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10317
10318         return status;
10319 }
10320
10321 /**
10322  * i40e_mirror_rule_set
10323  * @dev: pointer to the hardware structure
10324  * @mirror_conf: mirror rule info
10325  * @sw_id: mirror rule's sw_id
10326  * @on: enable/disable
10327  *
10328  * set a mirror rule.
10329  *
10330  **/
10331 static int
10332 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10333                         struct rte_eth_mirror_conf *mirror_conf,
10334                         uint8_t sw_id, uint8_t on)
10335 {
10336         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10337         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10338         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10339         struct i40e_mirror_rule *parent = NULL;
10340         uint16_t seid, dst_seid, rule_id;
10341         uint16_t i, j = 0;
10342         int ret;
10343
10344         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10345
10346         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10347                 PMD_DRV_LOG(ERR,
10348                         "mirror rule can not be configured without veb or vfs.");
10349                 return -ENOSYS;
10350         }
10351         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10352                 PMD_DRV_LOG(ERR, "mirror table is full.");
10353                 return -ENOSPC;
10354         }
10355         if (mirror_conf->dst_pool > pf->vf_num) {
10356                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10357                                  mirror_conf->dst_pool);
10358                 return -EINVAL;
10359         }
10360
10361         seid = pf->main_vsi->veb->seid;
10362
10363         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10364                 if (sw_id <= it->index) {
10365                         mirr_rule = it;
10366                         break;
10367                 }
10368                 parent = it;
10369         }
10370         if (mirr_rule && sw_id == mirr_rule->index) {
10371                 if (on) {
10372                         PMD_DRV_LOG(ERR, "mirror rule exists.");
10373                         return -EEXIST;
10374                 } else {
10375                         ret = i40e_aq_del_mirror_rule(hw, seid,
10376                                         mirr_rule->rule_type,
10377                                         mirr_rule->entries,
10378                                         mirr_rule->num_entries, mirr_rule->id);
10379                         if (ret < 0) {
10380                                 PMD_DRV_LOG(ERR,
10381                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
10382                                         ret, hw->aq.asq_last_status);
10383                                 return -ENOSYS;
10384                         }
10385                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10386                         rte_free(mirr_rule);
10387                         pf->nb_mirror_rule--;
10388                         return 0;
10389                 }
10390         } else if (!on) {
10391                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10392                 return -ENOENT;
10393         }
10394
10395         mirr_rule = rte_zmalloc("i40e_mirror_rule",
10396                                 sizeof(struct i40e_mirror_rule) , 0);
10397         if (!mirr_rule) {
10398                 PMD_DRV_LOG(ERR, "failed to allocate memory");
10399                 return I40E_ERR_NO_MEMORY;
10400         }
10401         switch (mirror_conf->rule_type) {
10402         case ETH_MIRROR_VLAN:
10403                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10404                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10405                                 mirr_rule->entries[j] =
10406                                         mirror_conf->vlan.vlan_id[i];
10407                                 j++;
10408                         }
10409                 }
10410                 if (j == 0) {
10411                         PMD_DRV_LOG(ERR, "vlan is not specified.");
10412                         rte_free(mirr_rule);
10413                         return -EINVAL;
10414                 }
10415                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10416                 break;
10417         case ETH_MIRROR_VIRTUAL_POOL_UP:
10418         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10419                 /* check if the specified pool bit is out of range */
10420                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10421                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
10422                         rte_free(mirr_rule);
10423                         return -EINVAL;
10424                 }
10425                 for (i = 0, j = 0; i < pf->vf_num; i++) {
10426                         if (mirror_conf->pool_mask & (1ULL << i)) {
10427                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10428                                 j++;
10429                         }
10430                 }
10431                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10432                         /* add pf vsi to entries */
10433                         mirr_rule->entries[j] = pf->main_vsi_seid;
10434                         j++;
10435                 }
10436                 if (j == 0) {
10437                         PMD_DRV_LOG(ERR, "pool is not specified.");
10438                         rte_free(mirr_rule);
10439                         return -EINVAL;
10440                 }
10441                 /* egress and ingress in aq commands means from switch but not port */
10442                 mirr_rule->rule_type =
10443                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10444                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10445                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10446                 break;
10447         case ETH_MIRROR_UPLINK_PORT:
10448                 /* egress and ingress in aq commands means from switch but not port*/
10449                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10450                 break;
10451         case ETH_MIRROR_DOWNLINK_PORT:
10452                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10453                 break;
10454         default:
10455                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10456                         mirror_conf->rule_type);
10457                 rte_free(mirr_rule);
10458                 return -EINVAL;
10459         }
10460
10461         /* If the dst_pool is equal to vf_num, consider it as PF */
10462         if (mirror_conf->dst_pool == pf->vf_num)
10463                 dst_seid = pf->main_vsi_seid;
10464         else
10465                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10466
10467         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10468                                       mirr_rule->rule_type, mirr_rule->entries,
10469                                       j, &rule_id);
10470         if (ret < 0) {
10471                 PMD_DRV_LOG(ERR,
10472                         "failed to add mirror rule: ret = %d, aq_err = %d.",
10473                         ret, hw->aq.asq_last_status);
10474                 rte_free(mirr_rule);
10475                 return -ENOSYS;
10476         }
10477
10478         mirr_rule->index = sw_id;
10479         mirr_rule->num_entries = j;
10480         mirr_rule->id = rule_id;
10481         mirr_rule->dst_vsi_seid = dst_seid;
10482
10483         if (parent)
10484                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10485         else
10486                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10487
10488         pf->nb_mirror_rule++;
10489         return 0;
10490 }
10491
10492 /**
10493  * i40e_mirror_rule_reset
10494  * @dev: pointer to the device
10495  * @sw_id: mirror rule's sw_id
10496  *
10497  * reset a mirror rule.
10498  *
10499  **/
10500 static int
10501 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10502 {
10503         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10504         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10505         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10506         uint16_t seid;
10507         int ret;
10508
10509         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10510
10511         seid = pf->main_vsi->veb->seid;
10512
10513         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10514                 if (sw_id == it->index) {
10515                         mirr_rule = it;
10516                         break;
10517                 }
10518         }
10519         if (mirr_rule) {
10520                 ret = i40e_aq_del_mirror_rule(hw, seid,
10521                                 mirr_rule->rule_type,
10522                                 mirr_rule->entries,
10523                                 mirr_rule->num_entries, mirr_rule->id);
10524                 if (ret < 0) {
10525                         PMD_DRV_LOG(ERR,
10526                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
10527                                 ret, hw->aq.asq_last_status);
10528                         return -ENOSYS;
10529                 }
10530                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10531                 rte_free(mirr_rule);
10532                 pf->nb_mirror_rule--;
10533         } else {
10534                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10535                 return -ENOENT;
10536         }
10537         return 0;
10538 }
10539
10540 static uint64_t
10541 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10542 {
10543         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10544         uint64_t systim_cycles;
10545
10546         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10547         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10548                         << 32;
10549
10550         return systim_cycles;
10551 }
10552
10553 static uint64_t
10554 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10555 {
10556         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10557         uint64_t rx_tstamp;
10558
10559         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10560         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10561                         << 32;
10562
10563         return rx_tstamp;
10564 }
10565
10566 static uint64_t
10567 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10568 {
10569         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10570         uint64_t tx_tstamp;
10571
10572         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10573         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10574                         << 32;
10575
10576         return tx_tstamp;
10577 }
10578
10579 static void
10580 i40e_start_timecounters(struct rte_eth_dev *dev)
10581 {
10582         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10583         struct i40e_adapter *adapter = dev->data->dev_private;
10584         struct rte_eth_link link;
10585         uint32_t tsync_inc_l;
10586         uint32_t tsync_inc_h;
10587
10588         /* Get current link speed. */
10589         i40e_dev_link_update(dev, 1);
10590         rte_eth_linkstatus_get(dev, &link);
10591
10592         switch (link.link_speed) {
10593         case ETH_SPEED_NUM_40G:
10594         case ETH_SPEED_NUM_25G:
10595                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10596                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10597                 break;
10598         case ETH_SPEED_NUM_10G:
10599                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10600                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10601                 break;
10602         case ETH_SPEED_NUM_1G:
10603                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10604                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10605                 break;
10606         default:
10607                 tsync_inc_l = 0x0;
10608                 tsync_inc_h = 0x0;
10609         }
10610
10611         /* Set the timesync increment value. */
10612         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10613         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10614
10615         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10616         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10617         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10618
10619         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10620         adapter->systime_tc.cc_shift = 0;
10621         adapter->systime_tc.nsec_mask = 0;
10622
10623         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10624         adapter->rx_tstamp_tc.cc_shift = 0;
10625         adapter->rx_tstamp_tc.nsec_mask = 0;
10626
10627         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10628         adapter->tx_tstamp_tc.cc_shift = 0;
10629         adapter->tx_tstamp_tc.nsec_mask = 0;
10630 }
10631
10632 static int
10633 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10634 {
10635         struct i40e_adapter *adapter = dev->data->dev_private;
10636
10637         adapter->systime_tc.nsec += delta;
10638         adapter->rx_tstamp_tc.nsec += delta;
10639         adapter->tx_tstamp_tc.nsec += delta;
10640
10641         return 0;
10642 }
10643
10644 static int
10645 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10646 {
10647         uint64_t ns;
10648         struct i40e_adapter *adapter = dev->data->dev_private;
10649
10650         ns = rte_timespec_to_ns(ts);
10651
10652         /* Set the timecounters to a new value. */
10653         adapter->systime_tc.nsec = ns;
10654         adapter->rx_tstamp_tc.nsec = ns;
10655         adapter->tx_tstamp_tc.nsec = ns;
10656
10657         return 0;
10658 }
10659
10660 static int
10661 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10662 {
10663         uint64_t ns, systime_cycles;
10664         struct i40e_adapter *adapter = dev->data->dev_private;
10665
10666         systime_cycles = i40e_read_systime_cyclecounter(dev);
10667         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10668         *ts = rte_ns_to_timespec(ns);
10669
10670         return 0;
10671 }
10672
10673 static int
10674 i40e_timesync_enable(struct rte_eth_dev *dev)
10675 {
10676         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10677         uint32_t tsync_ctl_l;
10678         uint32_t tsync_ctl_h;
10679
10680         /* Stop the timesync system time. */
10681         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10682         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10683         /* Reset the timesync system time value. */
10684         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10685         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10686
10687         i40e_start_timecounters(dev);
10688
10689         /* Clear timesync registers. */
10690         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10691         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10692         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10693         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10694         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10695         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10696
10697         /* Enable timestamping of PTP packets. */
10698         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10699         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10700
10701         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10702         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10703         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10704
10705         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10706         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10707
10708         return 0;
10709 }
10710
10711 static int
10712 i40e_timesync_disable(struct rte_eth_dev *dev)
10713 {
10714         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10715         uint32_t tsync_ctl_l;
10716         uint32_t tsync_ctl_h;
10717
10718         /* Disable timestamping of transmitted PTP packets. */
10719         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10720         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10721
10722         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10723         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10724
10725         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10726         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10727
10728         /* Reset the timesync increment value. */
10729         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10730         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10731
10732         return 0;
10733 }
10734
10735 static int
10736 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10737                                 struct timespec *timestamp, uint32_t flags)
10738 {
10739         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10740         struct i40e_adapter *adapter = dev->data->dev_private;
10741         uint32_t sync_status;
10742         uint32_t index = flags & 0x03;
10743         uint64_t rx_tstamp_cycles;
10744         uint64_t ns;
10745
10746         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10747         if ((sync_status & (1 << index)) == 0)
10748                 return -EINVAL;
10749
10750         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10751         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10752         *timestamp = rte_ns_to_timespec(ns);
10753
10754         return 0;
10755 }
10756
10757 static int
10758 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10759                                 struct timespec *timestamp)
10760 {
10761         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10762         struct i40e_adapter *adapter = dev->data->dev_private;
10763         uint32_t sync_status;
10764         uint64_t tx_tstamp_cycles;
10765         uint64_t ns;
10766
10767         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10768         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10769                 return -EINVAL;
10770
10771         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10772         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10773         *timestamp = rte_ns_to_timespec(ns);
10774
10775         return 0;
10776 }
10777
10778 /*
10779  * i40e_parse_dcb_configure - parse dcb configure from user
10780  * @dev: the device being configured
10781  * @dcb_cfg: pointer of the result of parse
10782  * @*tc_map: bit map of enabled traffic classes
10783  *
10784  * Returns 0 on success, negative value on failure
10785  */
10786 static int
10787 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10788                          struct i40e_dcbx_config *dcb_cfg,
10789                          uint8_t *tc_map)
10790 {
10791         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10792         uint8_t i, tc_bw, bw_lf;
10793
10794         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10795
10796         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10797         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10798                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10799                 return -EINVAL;
10800         }
10801
10802         /* assume each tc has the same bw */
10803         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10804         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10805                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10806         /* to ensure the sum of tcbw is equal to 100 */
10807         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10808         for (i = 0; i < bw_lf; i++)
10809                 dcb_cfg->etscfg.tcbwtable[i]++;
10810
10811         /* assume each tc has the same Transmission Selection Algorithm */
10812         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10813                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10814
10815         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10816                 dcb_cfg->etscfg.prioritytable[i] =
10817                                 dcb_rx_conf->dcb_tc[i];
10818
10819         /* FW needs one App to configure HW */
10820         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10821         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10822         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10823         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10824
10825         if (dcb_rx_conf->nb_tcs == 0)
10826                 *tc_map = 1; /* tc0 only */
10827         else
10828                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10829
10830         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10831                 dcb_cfg->pfc.willing = 0;
10832                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10833                 dcb_cfg->pfc.pfcenable = *tc_map;
10834         }
10835         return 0;
10836 }
10837
10838
10839 static enum i40e_status_code
10840 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10841                               struct i40e_aqc_vsi_properties_data *info,
10842                               uint8_t enabled_tcmap)
10843 {
10844         enum i40e_status_code ret;
10845         int i, total_tc = 0;
10846         uint16_t qpnum_per_tc, bsf, qp_idx;
10847         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10848         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10849         uint16_t used_queues;
10850
10851         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10852         if (ret != I40E_SUCCESS)
10853                 return ret;
10854
10855         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10856                 if (enabled_tcmap & (1 << i))
10857                         total_tc++;
10858         }
10859         if (total_tc == 0)
10860                 total_tc = 1;
10861         vsi->enabled_tc = enabled_tcmap;
10862
10863         /* different VSI has different queues assigned */
10864         if (vsi->type == I40E_VSI_MAIN)
10865                 used_queues = dev_data->nb_rx_queues -
10866                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10867         else if (vsi->type == I40E_VSI_VMDQ2)
10868                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10869         else {
10870                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10871                 return I40E_ERR_NO_AVAILABLE_VSI;
10872         }
10873
10874         qpnum_per_tc = used_queues / total_tc;
10875         /* Number of queues per enabled TC */
10876         if (qpnum_per_tc == 0) {
10877                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10878                 return I40E_ERR_INVALID_QP_ID;
10879         }
10880         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10881                                 I40E_MAX_Q_PER_TC);
10882         bsf = rte_bsf32(qpnum_per_tc);
10883
10884         /**
10885          * Configure TC and queue mapping parameters, for enabled TC,
10886          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10887          * default queue will serve it.
10888          */
10889         qp_idx = 0;
10890         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10891                 if (vsi->enabled_tc & (1 << i)) {
10892                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10893                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10894                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10895                         qp_idx += qpnum_per_tc;
10896                 } else
10897                         info->tc_mapping[i] = 0;
10898         }
10899
10900         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10901         if (vsi->type == I40E_VSI_SRIOV) {
10902                 info->mapping_flags |=
10903                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10904                 for (i = 0; i < vsi->nb_qps; i++)
10905                         info->queue_mapping[i] =
10906                                 rte_cpu_to_le_16(vsi->base_queue + i);
10907         } else {
10908                 info->mapping_flags |=
10909                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10910                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10911         }
10912         info->valid_sections |=
10913                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10914
10915         return I40E_SUCCESS;
10916 }
10917
10918 /*
10919  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10920  * @veb: VEB to be configured
10921  * @tc_map: enabled TC bitmap
10922  *
10923  * Returns 0 on success, negative value on failure
10924  */
10925 static enum i40e_status_code
10926 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10927 {
10928         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10929         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10930         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10931         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10932         enum i40e_status_code ret = I40E_SUCCESS;
10933         int i;
10934         uint32_t bw_max;
10935
10936         /* Check if enabled_tc is same as existing or new TCs */
10937         if (veb->enabled_tc == tc_map)
10938                 return ret;
10939
10940         /* configure tc bandwidth */
10941         memset(&veb_bw, 0, sizeof(veb_bw));
10942         veb_bw.tc_valid_bits = tc_map;
10943         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10944         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10945                 if (tc_map & BIT_ULL(i))
10946                         veb_bw.tc_bw_share_credits[i] = 1;
10947         }
10948         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10949                                                    &veb_bw, NULL);
10950         if (ret) {
10951                 PMD_INIT_LOG(ERR,
10952                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10953                         hw->aq.asq_last_status);
10954                 return ret;
10955         }
10956
10957         memset(&ets_query, 0, sizeof(ets_query));
10958         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10959                                                    &ets_query, NULL);
10960         if (ret != I40E_SUCCESS) {
10961                 PMD_DRV_LOG(ERR,
10962                         "Failed to get switch_comp ETS configuration %u",
10963                         hw->aq.asq_last_status);
10964                 return ret;
10965         }
10966         memset(&bw_query, 0, sizeof(bw_query));
10967         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10968                                                   &bw_query, NULL);
10969         if (ret != I40E_SUCCESS) {
10970                 PMD_DRV_LOG(ERR,
10971                         "Failed to get switch_comp bandwidth configuration %u",
10972                         hw->aq.asq_last_status);
10973                 return ret;
10974         }
10975
10976         /* store and print out BW info */
10977         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10978         veb->bw_info.bw_max = ets_query.tc_bw_max;
10979         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10980         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10981         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10982                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10983                      I40E_16_BIT_WIDTH);
10984         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10985                 veb->bw_info.bw_ets_share_credits[i] =
10986                                 bw_query.tc_bw_share_credits[i];
10987                 veb->bw_info.bw_ets_credits[i] =
10988                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10989                 /* 4 bits per TC, 4th bit is reserved */
10990                 veb->bw_info.bw_ets_max[i] =
10991                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10992                                   RTE_LEN2MASK(3, uint8_t));
10993                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10994                             veb->bw_info.bw_ets_share_credits[i]);
10995                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10996                             veb->bw_info.bw_ets_credits[i]);
10997                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10998                             veb->bw_info.bw_ets_max[i]);
10999         }
11000
11001         veb->enabled_tc = tc_map;
11002
11003         return ret;
11004 }
11005
11006
11007 /*
11008  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
11009  * @vsi: VSI to be configured
11010  * @tc_map: enabled TC bitmap
11011  *
11012  * Returns 0 on success, negative value on failure
11013  */
11014 static enum i40e_status_code
11015 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11016 {
11017         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11018         struct i40e_vsi_context ctxt;
11019         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11020         enum i40e_status_code ret = I40E_SUCCESS;
11021         int i;
11022
11023         /* Check if enabled_tc is same as existing or new TCs */
11024         if (vsi->enabled_tc == tc_map)
11025                 return ret;
11026
11027         /* configure tc bandwidth */
11028         memset(&bw_data, 0, sizeof(bw_data));
11029         bw_data.tc_valid_bits = tc_map;
11030         /* Enable ETS TCs with equal BW Share for now across all VSIs */
11031         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11032                 if (tc_map & BIT_ULL(i))
11033                         bw_data.tc_bw_credits[i] = 1;
11034         }
11035         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11036         if (ret) {
11037                 PMD_INIT_LOG(ERR,
11038                         "AQ command Config VSI BW allocation per TC failed = %d",
11039                         hw->aq.asq_last_status);
11040                 goto out;
11041         }
11042         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11043                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11044
11045         /* Update Queue Pairs Mapping for currently enabled UPs */
11046         ctxt.seid = vsi->seid;
11047         ctxt.pf_num = hw->pf_id;
11048         ctxt.vf_num = 0;
11049         ctxt.uplink_seid = vsi->uplink_seid;
11050         ctxt.info = vsi->info;
11051         i40e_get_cap(hw);
11052         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11053         if (ret)
11054                 goto out;
11055
11056         /* Update the VSI after updating the VSI queue-mapping information */
11057         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11058         if (ret) {
11059                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11060                         hw->aq.asq_last_status);
11061                 goto out;
11062         }
11063         /* update the local VSI info with updated queue map */
11064         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11065                                         sizeof(vsi->info.tc_mapping));
11066         rte_memcpy(&vsi->info.queue_mapping,
11067                         &ctxt.info.queue_mapping,
11068                 sizeof(vsi->info.queue_mapping));
11069         vsi->info.mapping_flags = ctxt.info.mapping_flags;
11070         vsi->info.valid_sections = 0;
11071
11072         /* query and update current VSI BW information */
11073         ret = i40e_vsi_get_bw_config(vsi);
11074         if (ret) {
11075                 PMD_INIT_LOG(ERR,
11076                          "Failed updating vsi bw info, err %s aq_err %s",
11077                          i40e_stat_str(hw, ret),
11078                          i40e_aq_str(hw, hw->aq.asq_last_status));
11079                 goto out;
11080         }
11081
11082         vsi->enabled_tc = tc_map;
11083
11084 out:
11085         return ret;
11086 }
11087
11088 /*
11089  * i40e_dcb_hw_configure - program the dcb setting to hw
11090  * @pf: pf the configuration is taken on
11091  * @new_cfg: new configuration
11092  * @tc_map: enabled TC bitmap
11093  *
11094  * Returns 0 on success, negative value on failure
11095  */
11096 static enum i40e_status_code
11097 i40e_dcb_hw_configure(struct i40e_pf *pf,
11098                       struct i40e_dcbx_config *new_cfg,
11099                       uint8_t tc_map)
11100 {
11101         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11102         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11103         struct i40e_vsi *main_vsi = pf->main_vsi;
11104         struct i40e_vsi_list *vsi_list;
11105         enum i40e_status_code ret;
11106         int i;
11107         uint32_t val;
11108
11109         /* Use the FW API if FW > v4.4*/
11110         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11111               (hw->aq.fw_maj_ver >= 5))) {
11112                 PMD_INIT_LOG(ERR,
11113                         "FW < v4.4, can not use FW LLDP API to configure DCB");
11114                 return I40E_ERR_FIRMWARE_API_VERSION;
11115         }
11116
11117         /* Check if need reconfiguration */
11118         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11119                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11120                 return I40E_SUCCESS;
11121         }
11122
11123         /* Copy the new config to the current config */
11124         *old_cfg = *new_cfg;
11125         old_cfg->etsrec = old_cfg->etscfg;
11126         ret = i40e_set_dcb_config(hw);
11127         if (ret) {
11128                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11129                          i40e_stat_str(hw, ret),
11130                          i40e_aq_str(hw, hw->aq.asq_last_status));
11131                 return ret;
11132         }
11133         /* set receive Arbiter to RR mode and ETS scheme by default */
11134         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11135                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11136                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11137                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11138                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11139                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11140                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11141                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11142                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11143                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11144                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11145                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11146                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11147         }
11148         /* get local mib to check whether it is configured correctly */
11149         /* IEEE mode */
11150         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11151         /* Get Local DCB Config */
11152         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11153                                      &hw->local_dcbx_config);
11154
11155         /* if Veb is created, need to update TC of it at first */
11156         if (main_vsi->veb) {
11157                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11158                 if (ret)
11159                         PMD_INIT_LOG(WARNING,
11160                                  "Failed configuring TC for VEB seid=%d",
11161                                  main_vsi->veb->seid);
11162         }
11163         /* Update each VSI */
11164         i40e_vsi_config_tc(main_vsi, tc_map);
11165         if (main_vsi->veb) {
11166                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11167                         /* Beside main VSI and VMDQ VSIs, only enable default
11168                          * TC for other VSIs
11169                          */
11170                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11171                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11172                                                          tc_map);
11173                         else
11174                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11175                                                          I40E_DEFAULT_TCMAP);
11176                         if (ret)
11177                                 PMD_INIT_LOG(WARNING,
11178                                         "Failed configuring TC for VSI seid=%d",
11179                                         vsi_list->vsi->seid);
11180                         /* continue */
11181                 }
11182         }
11183         return I40E_SUCCESS;
11184 }
11185
11186 /*
11187  * i40e_dcb_init_configure - initial dcb config
11188  * @dev: device being configured
11189  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11190  *
11191  * Returns 0 on success, negative value on failure
11192  */
11193 int
11194 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11195 {
11196         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11197         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11198         int i, ret = 0;
11199
11200         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11201                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11202                 return -ENOTSUP;
11203         }
11204
11205         /* DCB initialization:
11206          * Update DCB configuration from the Firmware and configure
11207          * LLDP MIB change event.
11208          */
11209         if (sw_dcb == TRUE) {
11210                 /* Stopping lldp is necessary for DPDK, but it will cause
11211                  * DCB init failed. For i40e_init_dcb(), the prerequisite
11212                  * for successful initialization of DCB is that LLDP is
11213                  * enabled. So it is needed to start lldp before DCB init
11214                  * and stop it after initialization.
11215                  */
11216                 ret = i40e_aq_start_lldp(hw, true, NULL);
11217                 if (ret != I40E_SUCCESS)
11218                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11219
11220                 ret = i40e_init_dcb(hw, true);
11221                 /* If lldp agent is stopped, the return value from
11222                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11223                  * adminq status. Otherwise, it should return success.
11224                  */
11225                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11226                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11227                         memset(&hw->local_dcbx_config, 0,
11228                                 sizeof(struct i40e_dcbx_config));
11229                         /* set dcb default configuration */
11230                         hw->local_dcbx_config.etscfg.willing = 0;
11231                         hw->local_dcbx_config.etscfg.maxtcs = 0;
11232                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11233                         hw->local_dcbx_config.etscfg.tsatable[0] =
11234                                                 I40E_IEEE_TSA_ETS;
11235                         /* all UPs mapping to TC0 */
11236                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11237                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11238                         hw->local_dcbx_config.etsrec =
11239                                 hw->local_dcbx_config.etscfg;
11240                         hw->local_dcbx_config.pfc.willing = 0;
11241                         hw->local_dcbx_config.pfc.pfccap =
11242                                                 I40E_MAX_TRAFFIC_CLASS;
11243                         /* FW needs one App to configure HW */
11244                         hw->local_dcbx_config.numapps = 1;
11245                         hw->local_dcbx_config.app[0].selector =
11246                                                 I40E_APP_SEL_ETHTYPE;
11247                         hw->local_dcbx_config.app[0].priority = 3;
11248                         hw->local_dcbx_config.app[0].protocolid =
11249                                                 I40E_APP_PROTOID_FCOE;
11250                         ret = i40e_set_dcb_config(hw);
11251                         if (ret) {
11252                                 PMD_INIT_LOG(ERR,
11253                                         "default dcb config fails. err = %d, aq_err = %d.",
11254                                         ret, hw->aq.asq_last_status);
11255                                 return -ENOSYS;
11256                         }
11257                 } else {
11258                         PMD_INIT_LOG(ERR,
11259                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
11260                                 ret, hw->aq.asq_last_status);
11261                         return -ENOTSUP;
11262                 }
11263
11264                 if (i40e_need_stop_lldp(dev)) {
11265                         ret = i40e_aq_stop_lldp(hw, true, true, NULL);
11266                         if (ret != I40E_SUCCESS)
11267                                 PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
11268                 }
11269         } else {
11270                 ret = i40e_aq_start_lldp(hw, true, NULL);
11271                 if (ret != I40E_SUCCESS)
11272                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11273
11274                 ret = i40e_init_dcb(hw, true);
11275                 if (!ret) {
11276                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11277                                 PMD_INIT_LOG(ERR,
11278                                         "HW doesn't support DCBX offload.");
11279                                 return -ENOTSUP;
11280                         }
11281                 } else {
11282                         PMD_INIT_LOG(ERR,
11283                                 "DCBX configuration failed, err = %d, aq_err = %d.",
11284                                 ret, hw->aq.asq_last_status);
11285                         return -ENOTSUP;
11286                 }
11287         }
11288         return 0;
11289 }
11290
11291 /*
11292  * i40e_dcb_setup - setup dcb related config
11293  * @dev: device being configured
11294  *
11295  * Returns 0 on success, negative value on failure
11296  */
11297 static int
11298 i40e_dcb_setup(struct rte_eth_dev *dev)
11299 {
11300         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11301         struct i40e_dcbx_config dcb_cfg;
11302         uint8_t tc_map = 0;
11303         int ret = 0;
11304
11305         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11306                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11307                 return -ENOTSUP;
11308         }
11309
11310         if (pf->vf_num != 0)
11311                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11312
11313         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11314         if (ret) {
11315                 PMD_INIT_LOG(ERR, "invalid dcb config");
11316                 return -EINVAL;
11317         }
11318         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11319         if (ret) {
11320                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11321                 return -ENOSYS;
11322         }
11323
11324         return 0;
11325 }
11326
11327 static int
11328 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11329                       struct rte_eth_dcb_info *dcb_info)
11330 {
11331         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11332         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11333         struct i40e_vsi *vsi = pf->main_vsi;
11334         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11335         uint16_t bsf, tc_mapping;
11336         int i, j = 0;
11337
11338         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11339                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11340         else
11341                 dcb_info->nb_tcs = 1;
11342         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11343                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11344         for (i = 0; i < dcb_info->nb_tcs; i++)
11345                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11346
11347         /* get queue mapping if vmdq is disabled */
11348         if (!pf->nb_cfg_vmdq_vsi) {
11349                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11350                         if (!(vsi->enabled_tc & (1 << i)))
11351                                 continue;
11352                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11353                         dcb_info->tc_queue.tc_rxq[j][i].base =
11354                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11355                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11356                         dcb_info->tc_queue.tc_txq[j][i].base =
11357                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11358                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11359                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11360                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11361                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11362                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11363                 }
11364                 return 0;
11365         }
11366
11367         /* get queue mapping if vmdq is enabled */
11368         do {
11369                 vsi = pf->vmdq[j].vsi;
11370                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11371                         if (!(vsi->enabled_tc & (1 << i)))
11372                                 continue;
11373                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11374                         dcb_info->tc_queue.tc_rxq[j][i].base =
11375                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11376                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11377                         dcb_info->tc_queue.tc_txq[j][i].base =
11378                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11379                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11380                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11381                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11382                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11383                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11384                 }
11385                 j++;
11386         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11387         return 0;
11388 }
11389
11390 static int
11391 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11392 {
11393         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11394         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11395         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11396         uint16_t msix_intr;
11397
11398         msix_intr = intr_handle->intr_vec[queue_id];
11399         if (msix_intr == I40E_MISC_VEC_ID)
11400                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11401                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
11402                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11403                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11404         else
11405                 I40E_WRITE_REG(hw,
11406                                I40E_PFINT_DYN_CTLN(msix_intr -
11407                                                    I40E_RX_VEC_START),
11408                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
11409                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11410                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11411
11412         I40E_WRITE_FLUSH(hw);
11413         rte_intr_ack(&pci_dev->intr_handle);
11414
11415         return 0;
11416 }
11417
11418 static int
11419 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11420 {
11421         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11422         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11423         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11424         uint16_t msix_intr;
11425
11426         msix_intr = intr_handle->intr_vec[queue_id];
11427         if (msix_intr == I40E_MISC_VEC_ID)
11428                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11429                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11430         else
11431                 I40E_WRITE_REG(hw,
11432                                I40E_PFINT_DYN_CTLN(msix_intr -
11433                                                    I40E_RX_VEC_START),
11434                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11435         I40E_WRITE_FLUSH(hw);
11436
11437         return 0;
11438 }
11439
11440 /**
11441  * This function is used to check if the register is valid.
11442  * Below is the valid registers list for X722 only:
11443  * 0x2b800--0x2bb00
11444  * 0x38700--0x38a00
11445  * 0x3d800--0x3db00
11446  * 0x208e00--0x209000
11447  * 0x20be00--0x20c000
11448  * 0x263c00--0x264000
11449  * 0x265c00--0x266000
11450  */
11451 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
11452 {
11453         if ((type != I40E_MAC_X722) &&
11454             ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
11455              (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
11456              (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
11457              (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
11458              (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
11459              (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
11460              (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
11461                 return 0;
11462         else
11463                 return 1;
11464 }
11465
11466 static int i40e_get_regs(struct rte_eth_dev *dev,
11467                          struct rte_dev_reg_info *regs)
11468 {
11469         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11470         uint32_t *ptr_data = regs->data;
11471         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11472         const struct i40e_reg_info *reg_info;
11473
11474         if (ptr_data == NULL) {
11475                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11476                 regs->width = sizeof(uint32_t);
11477                 return 0;
11478         }
11479
11480         /* The first few registers have to be read using AQ operations */
11481         reg_idx = 0;
11482         while (i40e_regs_adminq[reg_idx].name) {
11483                 reg_info = &i40e_regs_adminq[reg_idx++];
11484                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11485                         for (arr_idx2 = 0;
11486                                         arr_idx2 <= reg_info->count2;
11487                                         arr_idx2++) {
11488                                 reg_offset = arr_idx * reg_info->stride1 +
11489                                         arr_idx2 * reg_info->stride2;
11490                                 reg_offset += reg_info->base_addr;
11491                                 ptr_data[reg_offset >> 2] =
11492                                         i40e_read_rx_ctl(hw, reg_offset);
11493                         }
11494         }
11495
11496         /* The remaining registers can be read using primitives */
11497         reg_idx = 0;
11498         while (i40e_regs_others[reg_idx].name) {
11499                 reg_info = &i40e_regs_others[reg_idx++];
11500                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11501                         for (arr_idx2 = 0;
11502                                         arr_idx2 <= reg_info->count2;
11503                                         arr_idx2++) {
11504                                 reg_offset = arr_idx * reg_info->stride1 +
11505                                         arr_idx2 * reg_info->stride2;
11506                                 reg_offset += reg_info->base_addr;
11507                                 if (!i40e_valid_regs(hw->mac.type, reg_offset))
11508                                         ptr_data[reg_offset >> 2] = 0;
11509                                 else
11510                                         ptr_data[reg_offset >> 2] =
11511                                                 I40E_READ_REG(hw, reg_offset);
11512                         }
11513         }
11514
11515         return 0;
11516 }
11517
11518 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11519 {
11520         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11521
11522         /* Convert word count to byte count */
11523         return hw->nvm.sr_size << 1;
11524 }
11525
11526 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11527                            struct rte_dev_eeprom_info *eeprom)
11528 {
11529         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11530         uint16_t *data = eeprom->data;
11531         uint16_t offset, length, cnt_words;
11532         int ret_code;
11533
11534         offset = eeprom->offset >> 1;
11535         length = eeprom->length >> 1;
11536         cnt_words = length;
11537
11538         if (offset > hw->nvm.sr_size ||
11539                 offset + length > hw->nvm.sr_size) {
11540                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11541                 return -EINVAL;
11542         }
11543
11544         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11545
11546         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11547         if (ret_code != I40E_SUCCESS || cnt_words != length) {
11548                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11549                 return -EIO;
11550         }
11551
11552         return 0;
11553 }
11554
11555 static int i40e_get_module_info(struct rte_eth_dev *dev,
11556                                 struct rte_eth_dev_module_info *modinfo)
11557 {
11558         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11559         uint32_t sff8472_comp = 0;
11560         uint32_t sff8472_swap = 0;
11561         uint32_t sff8636_rev = 0;
11562         i40e_status status;
11563         uint32_t type = 0;
11564
11565         /* Check if firmware supports reading module EEPROM. */
11566         if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11567                 PMD_DRV_LOG(ERR,
11568                             "Module EEPROM memory read not supported. "
11569                             "Please update the NVM image.\n");
11570                 return -EINVAL;
11571         }
11572
11573         status = i40e_update_link_info(hw);
11574         if (status)
11575                 return -EIO;
11576
11577         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11578                 PMD_DRV_LOG(ERR,
11579                             "Cannot read module EEPROM memory. "
11580                             "No module connected.\n");
11581                 return -EINVAL;
11582         }
11583
11584         type = hw->phy.link_info.module_type[0];
11585
11586         switch (type) {
11587         case I40E_MODULE_TYPE_SFP:
11588                 status = i40e_aq_get_phy_register(hw,
11589                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11590                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
11591                                 I40E_MODULE_SFF_8472_COMP,
11592                                 &sff8472_comp, NULL);
11593                 if (status)
11594                         return -EIO;
11595
11596                 status = i40e_aq_get_phy_register(hw,
11597                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11598                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
11599                                 I40E_MODULE_SFF_8472_SWAP,
11600                                 &sff8472_swap, NULL);
11601                 if (status)
11602                         return -EIO;
11603
11604                 /* Check if the module requires address swap to access
11605                  * the other EEPROM memory page.
11606                  */
11607                 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11608                         PMD_DRV_LOG(WARNING,
11609                                     "Module address swap to access "
11610                                     "page 0xA2 is not supported.\n");
11611                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11612                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11613                 } else if (sff8472_comp == 0x00) {
11614                         /* Module is not SFF-8472 compliant */
11615                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11616                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11617                 } else {
11618                         modinfo->type = RTE_ETH_MODULE_SFF_8472;
11619                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11620                 }
11621                 break;
11622         case I40E_MODULE_TYPE_QSFP_PLUS:
11623                 /* Read from memory page 0. */
11624                 status = i40e_aq_get_phy_register(hw,
11625                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11626                                 0, 1,
11627                                 I40E_MODULE_REVISION_ADDR,
11628                                 &sff8636_rev, NULL);
11629                 if (status)
11630                         return -EIO;
11631                 /* Determine revision compliance byte */
11632                 if (sff8636_rev > 0x02) {
11633                         /* Module is SFF-8636 compliant */
11634                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
11635                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11636                 } else {
11637                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
11638                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11639                 }
11640                 break;
11641         case I40E_MODULE_TYPE_QSFP28:
11642                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
11643                 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11644                 break;
11645         default:
11646                 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11647                 return -EINVAL;
11648         }
11649         return 0;
11650 }
11651
11652 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11653                                   struct rte_dev_eeprom_info *info)
11654 {
11655         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11656         bool is_sfp = false;
11657         i40e_status status;
11658         uint8_t *data;
11659         uint32_t value = 0;
11660         uint32_t i;
11661
11662         if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11663                 is_sfp = true;
11664
11665         data = info->data;
11666         for (i = 0; i < info->length; i++) {
11667                 u32 offset = i + info->offset;
11668                 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11669
11670                 /* Check if we need to access the other memory page */
11671                 if (is_sfp) {
11672                         if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11673                                 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11674                                 addr = I40E_I2C_EEPROM_DEV_ADDR2;
11675                         }
11676                 } else {
11677                         while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11678                                 /* Compute memory page number and offset. */
11679                                 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11680                                 addr++;
11681                         }
11682                 }
11683                 status = i40e_aq_get_phy_register(hw,
11684                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11685                                 addr, 1, offset, &value, NULL);
11686                 if (status)
11687                         return -EIO;
11688                 data[i] = (uint8_t)value;
11689         }
11690         return 0;
11691 }
11692
11693 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11694                                      struct rte_ether_addr *mac_addr)
11695 {
11696         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11697         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11698         struct i40e_vsi *vsi = pf->main_vsi;
11699         struct i40e_mac_filter_info mac_filter;
11700         struct i40e_mac_filter *f;
11701         int ret;
11702
11703         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
11704                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11705                 return -EINVAL;
11706         }
11707
11708         TAILQ_FOREACH(f, &vsi->mac_list, next) {
11709                 if (rte_is_same_ether_addr(&pf->dev_addr,
11710                                                 &f->mac_info.mac_addr))
11711                         break;
11712         }
11713
11714         if (f == NULL) {
11715                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11716                 return -EIO;
11717         }
11718
11719         mac_filter = f->mac_info;
11720         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11721         if (ret != I40E_SUCCESS) {
11722                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11723                 return -EIO;
11724         }
11725         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11726         ret = i40e_vsi_add_mac(vsi, &mac_filter);
11727         if (ret != I40E_SUCCESS) {
11728                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11729                 return -EIO;
11730         }
11731         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11732
11733         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11734                                         mac_addr->addr_bytes, NULL);
11735         if (ret != I40E_SUCCESS) {
11736                 PMD_DRV_LOG(ERR, "Failed to change mac");
11737                 return -EIO;
11738         }
11739
11740         return 0;
11741 }
11742
11743 static int
11744 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11745 {
11746         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11747         struct rte_eth_dev_data *dev_data = pf->dev_data;
11748         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11749         int ret = 0;
11750
11751         /* check if mtu is within the allowed range */
11752         if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
11753                 return -EINVAL;
11754
11755         /* mtu setting is forbidden if port is start */
11756         if (dev_data->dev_started) {
11757                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11758                             dev_data->port_id);
11759                 return -EBUSY;
11760         }
11761
11762         if (frame_size > I40E_ETH_MAX_LEN)
11763                 dev_data->dev_conf.rxmode.offloads |=
11764                         DEV_RX_OFFLOAD_JUMBO_FRAME;
11765         else
11766                 dev_data->dev_conf.rxmode.offloads &=
11767                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
11768
11769         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11770
11771         return ret;
11772 }
11773
11774 /* Restore ethertype filter */
11775 static void
11776 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11777 {
11778         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11779         struct i40e_ethertype_filter_list
11780                 *ethertype_list = &pf->ethertype.ethertype_list;
11781         struct i40e_ethertype_filter *f;
11782         struct i40e_control_filter_stats stats;
11783         uint16_t flags;
11784
11785         TAILQ_FOREACH(f, ethertype_list, rules) {
11786                 flags = 0;
11787                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11788                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11789                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11790                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11791                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11792
11793                 memset(&stats, 0, sizeof(stats));
11794                 i40e_aq_add_rem_control_packet_filter(hw,
11795                                             f->input.mac_addr.addr_bytes,
11796                                             f->input.ether_type,
11797                                             flags, pf->main_vsi->seid,
11798                                             f->queue, 1, &stats, NULL);
11799         }
11800         PMD_DRV_LOG(INFO, "Ethertype filter:"
11801                     " mac_etype_used = %u, etype_used = %u,"
11802                     " mac_etype_free = %u, etype_free = %u",
11803                     stats.mac_etype_used, stats.etype_used,
11804                     stats.mac_etype_free, stats.etype_free);
11805 }
11806
11807 /* Restore tunnel filter */
11808 static void
11809 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11810 {
11811         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11812         struct i40e_vsi *vsi;
11813         struct i40e_pf_vf *vf;
11814         struct i40e_tunnel_filter_list
11815                 *tunnel_list = &pf->tunnel.tunnel_list;
11816         struct i40e_tunnel_filter *f;
11817         struct i40e_aqc_cloud_filters_element_bb cld_filter;
11818         bool big_buffer = 0;
11819
11820         TAILQ_FOREACH(f, tunnel_list, rules) {
11821                 if (!f->is_to_vf)
11822                         vsi = pf->main_vsi;
11823                 else {
11824                         vf = &pf->vfs[f->vf_id];
11825                         vsi = vf->vsi;
11826                 }
11827                 memset(&cld_filter, 0, sizeof(cld_filter));
11828                 rte_ether_addr_copy((struct rte_ether_addr *)
11829                                 &f->input.outer_mac,
11830                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
11831                 rte_ether_addr_copy((struct rte_ether_addr *)
11832                                 &f->input.inner_mac,
11833                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
11834                 cld_filter.element.inner_vlan = f->input.inner_vlan;
11835                 cld_filter.element.flags = f->input.flags;
11836                 cld_filter.element.tenant_id = f->input.tenant_id;
11837                 cld_filter.element.queue_number = f->queue;
11838                 rte_memcpy(cld_filter.general_fields,
11839                            f->input.general_fields,
11840                            sizeof(f->input.general_fields));
11841
11842                 if (((f->input.flags &
11843                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11844                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11845                     ((f->input.flags &
11846                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11847                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11848                     ((f->input.flags &
11849                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11850                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
11851                         big_buffer = 1;
11852
11853                 if (big_buffer)
11854                         i40e_aq_add_cloud_filters_bb(hw,
11855                                         vsi->seid, &cld_filter, 1);
11856                 else
11857                         i40e_aq_add_cloud_filters(hw, vsi->seid,
11858                                                   &cld_filter.element, 1);
11859         }
11860 }
11861
11862 static void
11863 i40e_filter_restore(struct i40e_pf *pf)
11864 {
11865         i40e_ethertype_filter_restore(pf);
11866         i40e_tunnel_filter_restore(pf);
11867         i40e_fdir_filter_restore(pf);
11868         (void)i40e_hash_filter_restore(pf);
11869 }
11870
11871 bool
11872 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11873 {
11874         if (strcmp(dev->device->driver->name, drv->driver.name))
11875                 return false;
11876
11877         return true;
11878 }
11879
11880 bool
11881 is_i40e_supported(struct rte_eth_dev *dev)
11882 {
11883         return is_device_supported(dev, &rte_i40e_pmd);
11884 }
11885
11886 struct i40e_customized_pctype*
11887 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11888 {
11889         int i;
11890
11891         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11892                 if (pf->customized_pctype[i].index == index)
11893                         return &pf->customized_pctype[i];
11894         }
11895         return NULL;
11896 }
11897
11898 static int
11899 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11900                               uint32_t pkg_size, uint32_t proto_num,
11901                               struct rte_pmd_i40e_proto_info *proto,
11902                               enum rte_pmd_i40e_package_op op)
11903 {
11904         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11905         uint32_t pctype_num;
11906         struct rte_pmd_i40e_ptype_info *pctype;
11907         uint32_t buff_size;
11908         struct i40e_customized_pctype *new_pctype = NULL;
11909         uint8_t proto_id;
11910         uint8_t pctype_value;
11911         char name[64];
11912         uint32_t i, j, n;
11913         int ret;
11914
11915         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11916             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11917                 PMD_DRV_LOG(ERR, "Unsupported operation.");
11918                 return -1;
11919         }
11920
11921         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11922                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
11923                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11924         if (ret) {
11925                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11926                 return -1;
11927         }
11928         if (!pctype_num) {
11929                 PMD_DRV_LOG(INFO, "No new pctype added");
11930                 return -1;
11931         }
11932
11933         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11934         pctype = rte_zmalloc("new_pctype", buff_size, 0);
11935         if (!pctype) {
11936                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11937                 return -1;
11938         }
11939         /* get information about new pctype list */
11940         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11941                                         (uint8_t *)pctype, buff_size,
11942                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11943         if (ret) {
11944                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11945                 rte_free(pctype);
11946                 return -1;
11947         }
11948
11949         /* Update customized pctype. */
11950         for (i = 0; i < pctype_num; i++) {
11951                 pctype_value = pctype[i].ptype_id;
11952                 memset(name, 0, sizeof(name));
11953                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11954                         proto_id = pctype[i].protocols[j];
11955                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11956                                 continue;
11957                         for (n = 0; n < proto_num; n++) {
11958                                 if (proto[n].proto_id != proto_id)
11959                                         continue;
11960                                 strlcat(name, proto[n].name, sizeof(name));
11961                                 strlcat(name, "_", sizeof(name));
11962                                 break;
11963                         }
11964                 }
11965                 name[strlen(name) - 1] = '\0';
11966                 PMD_DRV_LOG(INFO, "name = %s\n", name);
11967                 if (!strcmp(name, "GTPC"))
11968                         new_pctype =
11969                                 i40e_find_customized_pctype(pf,
11970                                                       I40E_CUSTOMIZED_GTPC);
11971                 else if (!strcmp(name, "GTPU_IPV4"))
11972                         new_pctype =
11973                                 i40e_find_customized_pctype(pf,
11974                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11975                 else if (!strcmp(name, "GTPU_IPV6"))
11976                         new_pctype =
11977                                 i40e_find_customized_pctype(pf,
11978                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11979                 else if (!strcmp(name, "GTPU"))
11980                         new_pctype =
11981                                 i40e_find_customized_pctype(pf,
11982                                                       I40E_CUSTOMIZED_GTPU);
11983                 else if (!strcmp(name, "IPV4_L2TPV3"))
11984                         new_pctype =
11985                                 i40e_find_customized_pctype(pf,
11986                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
11987                 else if (!strcmp(name, "IPV6_L2TPV3"))
11988                         new_pctype =
11989                                 i40e_find_customized_pctype(pf,
11990                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
11991                 else if (!strcmp(name, "IPV4_ESP"))
11992                         new_pctype =
11993                                 i40e_find_customized_pctype(pf,
11994                                                 I40E_CUSTOMIZED_ESP_IPV4);
11995                 else if (!strcmp(name, "IPV6_ESP"))
11996                         new_pctype =
11997                                 i40e_find_customized_pctype(pf,
11998                                                 I40E_CUSTOMIZED_ESP_IPV6);
11999                 else if (!strcmp(name, "IPV4_UDP_ESP"))
12000                         new_pctype =
12001                                 i40e_find_customized_pctype(pf,
12002                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
12003                 else if (!strcmp(name, "IPV6_UDP_ESP"))
12004                         new_pctype =
12005                                 i40e_find_customized_pctype(pf,
12006                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
12007                 else if (!strcmp(name, "IPV4_AH"))
12008                         new_pctype =
12009                                 i40e_find_customized_pctype(pf,
12010                                                 I40E_CUSTOMIZED_AH_IPV4);
12011                 else if (!strcmp(name, "IPV6_AH"))
12012                         new_pctype =
12013                                 i40e_find_customized_pctype(pf,
12014                                                 I40E_CUSTOMIZED_AH_IPV6);
12015                 if (new_pctype) {
12016                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
12017                                 new_pctype->pctype = pctype_value;
12018                                 new_pctype->valid = true;
12019                         } else {
12020                                 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
12021                                 new_pctype->valid = false;
12022                         }
12023                 }
12024         }
12025
12026         rte_free(pctype);
12027         return 0;
12028 }
12029
12030 static int
12031 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
12032                              uint32_t pkg_size, uint32_t proto_num,
12033                              struct rte_pmd_i40e_proto_info *proto,
12034                              enum rte_pmd_i40e_package_op op)
12035 {
12036         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
12037         uint16_t port_id = dev->data->port_id;
12038         uint32_t ptype_num;
12039         struct rte_pmd_i40e_ptype_info *ptype;
12040         uint32_t buff_size;
12041         uint8_t proto_id;
12042         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
12043         uint32_t i, j, n;
12044         bool in_tunnel;
12045         int ret;
12046
12047         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12048             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12049                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12050                 return -1;
12051         }
12052
12053         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12054                 rte_pmd_i40e_ptype_mapping_reset(port_id);
12055                 return 0;
12056         }
12057
12058         /* get information about new ptype num */
12059         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12060                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
12061                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
12062         if (ret) {
12063                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
12064                 return ret;
12065         }
12066         if (!ptype_num) {
12067                 PMD_DRV_LOG(INFO, "No new ptype added");
12068                 return -1;
12069         }
12070
12071         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
12072         ptype = rte_zmalloc("new_ptype", buff_size, 0);
12073         if (!ptype) {
12074                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12075                 return -1;
12076         }
12077
12078         /* get information about new ptype list */
12079         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12080                                         (uint8_t *)ptype, buff_size,
12081                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
12082         if (ret) {
12083                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
12084                 rte_free(ptype);
12085                 return ret;
12086         }
12087
12088         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
12089         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
12090         if (!ptype_mapping) {
12091                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12092                 rte_free(ptype);
12093                 return -1;
12094         }
12095
12096         /* Update ptype mapping table. */
12097         for (i = 0; i < ptype_num; i++) {
12098                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
12099                 ptype_mapping[i].sw_ptype = 0;
12100                 in_tunnel = false;
12101                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12102                         proto_id = ptype[i].protocols[j];
12103                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12104                                 continue;
12105                         for (n = 0; n < proto_num; n++) {
12106                                 if (proto[n].proto_id != proto_id)
12107                                         continue;
12108                                 memset(name, 0, sizeof(name));
12109                                 strcpy(name, proto[n].name);
12110                                 PMD_DRV_LOG(INFO, "name = %s\n", name);
12111                                 if (!strncasecmp(name, "PPPOE", 5))
12112                                         ptype_mapping[i].sw_ptype |=
12113                                                 RTE_PTYPE_L2_ETHER_PPPOE;
12114                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12115                                          !in_tunnel) {
12116                                         ptype_mapping[i].sw_ptype |=
12117                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12118                                         ptype_mapping[i].sw_ptype |=
12119                                                 RTE_PTYPE_L4_FRAG;
12120                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12121                                            in_tunnel) {
12122                                         ptype_mapping[i].sw_ptype |=
12123                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12124                                         ptype_mapping[i].sw_ptype |=
12125                                                 RTE_PTYPE_INNER_L4_FRAG;
12126                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
12127                                         ptype_mapping[i].sw_ptype |=
12128                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12129                                         in_tunnel = true;
12130                                 } else if (!strncasecmp(name, "IPV4", 4) &&
12131                                            !in_tunnel)
12132                                         ptype_mapping[i].sw_ptype |=
12133                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12134                                 else if (!strncasecmp(name, "IPV4", 4) &&
12135                                          in_tunnel)
12136                                         ptype_mapping[i].sw_ptype |=
12137                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12138                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12139                                          !in_tunnel) {
12140                                         ptype_mapping[i].sw_ptype |=
12141                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12142                                         ptype_mapping[i].sw_ptype |=
12143                                                 RTE_PTYPE_L4_FRAG;
12144                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12145                                            in_tunnel) {
12146                                         ptype_mapping[i].sw_ptype |=
12147                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12148                                         ptype_mapping[i].sw_ptype |=
12149                                                 RTE_PTYPE_INNER_L4_FRAG;
12150                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
12151                                         ptype_mapping[i].sw_ptype |=
12152                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12153                                         in_tunnel = true;
12154                                 } else if (!strncasecmp(name, "IPV6", 4) &&
12155                                            !in_tunnel)
12156                                         ptype_mapping[i].sw_ptype |=
12157                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12158                                 else if (!strncasecmp(name, "IPV6", 4) &&
12159                                          in_tunnel)
12160                                         ptype_mapping[i].sw_ptype |=
12161                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12162                                 else if (!strncasecmp(name, "UDP", 3) &&
12163                                          !in_tunnel)
12164                                         ptype_mapping[i].sw_ptype |=
12165                                                 RTE_PTYPE_L4_UDP;
12166                                 else if (!strncasecmp(name, "UDP", 3) &&
12167                                          in_tunnel)
12168                                         ptype_mapping[i].sw_ptype |=
12169                                                 RTE_PTYPE_INNER_L4_UDP;
12170                                 else if (!strncasecmp(name, "TCP", 3) &&
12171                                          !in_tunnel)
12172                                         ptype_mapping[i].sw_ptype |=
12173                                                 RTE_PTYPE_L4_TCP;
12174                                 else if (!strncasecmp(name, "TCP", 3) &&
12175                                          in_tunnel)
12176                                         ptype_mapping[i].sw_ptype |=
12177                                                 RTE_PTYPE_INNER_L4_TCP;
12178                                 else if (!strncasecmp(name, "SCTP", 4) &&
12179                                          !in_tunnel)
12180                                         ptype_mapping[i].sw_ptype |=
12181                                                 RTE_PTYPE_L4_SCTP;
12182                                 else if (!strncasecmp(name, "SCTP", 4) &&
12183                                          in_tunnel)
12184                                         ptype_mapping[i].sw_ptype |=
12185                                                 RTE_PTYPE_INNER_L4_SCTP;
12186                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12187                                           !strncasecmp(name, "ICMPV6", 6)) &&
12188                                          !in_tunnel)
12189                                         ptype_mapping[i].sw_ptype |=
12190                                                 RTE_PTYPE_L4_ICMP;
12191                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12192                                           !strncasecmp(name, "ICMPV6", 6)) &&
12193                                          in_tunnel)
12194                                         ptype_mapping[i].sw_ptype |=
12195                                                 RTE_PTYPE_INNER_L4_ICMP;
12196                                 else if (!strncasecmp(name, "GTPC", 4)) {
12197                                         ptype_mapping[i].sw_ptype |=
12198                                                 RTE_PTYPE_TUNNEL_GTPC;
12199                                         in_tunnel = true;
12200                                 } else if (!strncasecmp(name, "GTPU", 4)) {
12201                                         ptype_mapping[i].sw_ptype |=
12202                                                 RTE_PTYPE_TUNNEL_GTPU;
12203                                         in_tunnel = true;
12204                                 } else if (!strncasecmp(name, "ESP", 3)) {
12205                                         ptype_mapping[i].sw_ptype |=
12206                                                 RTE_PTYPE_TUNNEL_ESP;
12207                                         in_tunnel = true;
12208                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
12209                                         ptype_mapping[i].sw_ptype |=
12210                                                 RTE_PTYPE_TUNNEL_GRENAT;
12211                                         in_tunnel = true;
12212                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12213                                            !strncasecmp(name, "L2TPV2", 6) ||
12214                                            !strncasecmp(name, "L2TPV3", 6)) {
12215                                         ptype_mapping[i].sw_ptype |=
12216                                                 RTE_PTYPE_TUNNEL_L2TP;
12217                                         in_tunnel = true;
12218                                 }
12219
12220                                 break;
12221                         }
12222                 }
12223         }
12224
12225         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12226                                                 ptype_num, 0);
12227         if (ret)
12228                 PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
12229
12230         rte_free(ptype_mapping);
12231         rte_free(ptype);
12232         return ret;
12233 }
12234
12235 void
12236 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12237                             uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12238 {
12239         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12240         uint32_t proto_num;
12241         struct rte_pmd_i40e_proto_info *proto;
12242         uint32_t buff_size;
12243         uint32_t i;
12244         int ret;
12245
12246         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12247             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12248                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12249                 return;
12250         }
12251
12252         /* get information about protocol number */
12253         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12254                                        (uint8_t *)&proto_num, sizeof(proto_num),
12255                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12256         if (ret) {
12257                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
12258                 return;
12259         }
12260         if (!proto_num) {
12261                 PMD_DRV_LOG(INFO, "No new protocol added");
12262                 return;
12263         }
12264
12265         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12266         proto = rte_zmalloc("new_proto", buff_size, 0);
12267         if (!proto) {
12268                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12269                 return;
12270         }
12271
12272         /* get information about protocol list */
12273         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12274                                         (uint8_t *)proto, buff_size,
12275                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12276         if (ret) {
12277                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
12278                 rte_free(proto);
12279                 return;
12280         }
12281
12282         /* Check if GTP is supported. */
12283         for (i = 0; i < proto_num; i++) {
12284                 if (!strncmp(proto[i].name, "GTP", 3)) {
12285                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12286                                 pf->gtp_support = true;
12287                         else
12288                                 pf->gtp_support = false;
12289                         break;
12290                 }
12291         }
12292
12293         /* Check if ESP is supported. */
12294         for (i = 0; i < proto_num; i++) {
12295                 if (!strncmp(proto[i].name, "ESP", 3)) {
12296                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12297                                 pf->esp_support = true;
12298                         else
12299                                 pf->esp_support = false;
12300                         break;
12301                 }
12302         }
12303
12304         /* Update customized pctype info */
12305         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12306                                             proto_num, proto, op);
12307         if (ret)
12308                 PMD_DRV_LOG(INFO, "No pctype is updated.");
12309
12310         /* Update customized ptype info */
12311         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12312                                            proto_num, proto, op);
12313         if (ret)
12314                 PMD_DRV_LOG(INFO, "No ptype is updated.");
12315
12316         rte_free(proto);
12317 }
12318
12319 /* Create a QinQ cloud filter
12320  *
12321  * The Fortville NIC has limited resources for tunnel filters,
12322  * so we can only reuse existing filters.
12323  *
12324  * In step 1 we define which Field Vector fields can be used for
12325  * filter types.
12326  * As we do not have the inner tag defined as a field,
12327  * we have to define it first, by reusing one of L1 entries.
12328  *
12329  * In step 2 we are replacing one of existing filter types with
12330  * a new one for QinQ.
12331  * As we reusing L1 and replacing L2, some of the default filter
12332  * types will disappear,which depends on L1 and L2 entries we reuse.
12333  *
12334  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12335  *
12336  * 1.   Create L1 filter of outer vlan (12b) which will be in use
12337  *              later when we define the cloud filter.
12338  *      a.      Valid_flags.replace_cloud = 0
12339  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
12340  *      c.      New_filter = 0x10
12341  *      d.      TR bit = 0xff (optional, not used here)
12342  *      e.      Buffer – 2 entries:
12343  *              i.      Byte 0 = 8 (outer vlan FV index).
12344  *                      Byte 1 = 0 (rsv)
12345  *                      Byte 2-3 = 0x0fff
12346  *              ii.     Byte 0 = 37 (inner vlan FV index).
12347  *                      Byte 1 =0 (rsv)
12348  *                      Byte 2-3 = 0x0fff
12349  *
12350  * Step 2:
12351  * 2.   Create cloud filter using two L1 filters entries: stag and
12352  *              new filter(outer vlan+ inner vlan)
12353  *      a.      Valid_flags.replace_cloud = 1
12354  *      b.      Old_filter = 1 (instead of outer IP)
12355  *      c.      New_filter = 0x10
12356  *      d.      Buffer – 2 entries:
12357  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
12358  *                      Byte 1-3 = 0 (rsv)
12359  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12360  *                      Byte 9-11 = 0 (rsv)
12361  */
12362 static int
12363 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12364 {
12365         int ret = -ENOTSUP;
12366         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12367         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12368         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12369         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
12370
12371         if (pf->support_multi_driver) {
12372                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12373                 return ret;
12374         }
12375
12376         /* Init */
12377         memset(&filter_replace, 0,
12378                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12379         memset(&filter_replace_buf, 0,
12380                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12381
12382         /* create L1 filter */
12383         filter_replace.old_filter_type =
12384                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12385         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12386         filter_replace.tr_bit = 0;
12387
12388         /* Prepare the buffer, 2 entries */
12389         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12390         filter_replace_buf.data[0] |=
12391                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12392         /* Field Vector 12b mask */
12393         filter_replace_buf.data[2] = 0xff;
12394         filter_replace_buf.data[3] = 0x0f;
12395         filter_replace_buf.data[4] =
12396                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12397         filter_replace_buf.data[4] |=
12398                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12399         /* Field Vector 12b mask */
12400         filter_replace_buf.data[6] = 0xff;
12401         filter_replace_buf.data[7] = 0x0f;
12402         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12403                         &filter_replace_buf);
12404         if (ret != I40E_SUCCESS)
12405                 return ret;
12406
12407         if (filter_replace.old_filter_type !=
12408             filter_replace.new_filter_type)
12409                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
12410                             " original: 0x%x, new: 0x%x",
12411                             dev->device->name,
12412                             filter_replace.old_filter_type,
12413                             filter_replace.new_filter_type);
12414
12415         /* Apply the second L2 cloud filter */
12416         memset(&filter_replace, 0,
12417                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12418         memset(&filter_replace_buf, 0,
12419                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12420
12421         /* create L2 filter, input for L2 filter will be L1 filter  */
12422         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12423         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12424         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12425
12426         /* Prepare the buffer, 2 entries */
12427         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12428         filter_replace_buf.data[0] |=
12429                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12430         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12431         filter_replace_buf.data[4] |=
12432                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12433         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12434                         &filter_replace_buf);
12435         if (!ret && (filter_replace.old_filter_type !=
12436                      filter_replace.new_filter_type))
12437                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
12438                             " original: 0x%x, new: 0x%x",
12439                             dev->device->name,
12440                             filter_replace.old_filter_type,
12441                             filter_replace.new_filter_type);
12442
12443         return ret;
12444 }
12445
12446 RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE);
12447 RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE);
12448 #ifdef RTE_ETHDEV_DEBUG_RX
12449 RTE_LOG_REGISTER(i40e_logtype_rx, pmd.net.i40e.rx, DEBUG);
12450 #endif
12451 #ifdef RTE_ETHDEV_DEBUG_TX
12452 RTE_LOG_REGISTER(i40e_logtype_tx, pmd.net.i40e.tx, DEBUG);
12453 #endif
12454
12455 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12456                               ETH_I40E_FLOATING_VEB_ARG "=1"
12457                               ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
12458                               ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12459                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1");