net/i40e: fix multiple driver support
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_eal.h>
15 #include <rte_string_fns.h>
16 #include <rte_pci.h>
17 #include <rte_bus_pci.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_pci.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_alarm.h>
25 #include <rte_dev.h>
26 #include <rte_eth_ctrl.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29
30 #include "i40e_logs.h"
31 #include "base/i40e_prototype.h"
32 #include "base/i40e_adminq_cmd.h"
33 #include "base/i40e_type.h"
34 #include "base/i40e_register.h"
35 #include "base/i40e_dcb.h"
36 #include "base/i40e_diag.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42
43 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
45
46 #define I40E_CLEAR_PXE_WAIT_MS     200
47
48 /* Maximun number of capability elements */
49 #define I40E_MAX_CAP_ELE_NUM       128
50
51 /* Wait count and interval */
52 #define I40E_CHK_Q_ENA_COUNT       1000
53 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
54
55 /* Maximun number of VSI */
56 #define I40E_MAX_NUM_VSIS          (384UL)
57
58 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
59
60 /* Flow control default timer */
61 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
62
63 /* Flow control enable fwd bit */
64 #define I40E_PRTMAC_FWD_CTRL   0x00000001
65
66 /* Receive Packet Buffer size */
67 #define I40E_RXPBSIZE (968 * 1024)
68
69 /* Kilobytes shift */
70 #define I40E_KILOSHIFT 10
71
72 /* Flow control default high water */
73 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
74
75 /* Flow control default low water */
76 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
77
78 /* Receive Average Packet Size in Byte*/
79 #define I40E_PACKET_AVERAGE_SIZE 128
80
81 /* Mask of PF interrupt causes */
82 #define I40E_PFINT_ICR0_ENA_MASK ( \
83                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
84                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
85                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
86                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
87                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
88                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
89                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
90                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
91                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
92
93 #define I40E_FLOW_TYPES ( \
94         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
95         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
96         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
97         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
98         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
99         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
100         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
104         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
105
106 /* Additional timesync values. */
107 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
108 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
109 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
110 #define I40E_PRTTSYN_TSYNENA     0x80000000
111 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
112 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
113
114 /**
115  * Below are values for writing un-exposed registers suggested
116  * by silicon experts
117  */
118 /* Destination MAC address */
119 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
120 /* Source MAC address */
121 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
122 /* Outer (S-Tag) VLAN tag in the outer L2 header */
123 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
124 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
125 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
126 /* Single VLAN tag in the inner L2 header */
127 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
128 /* Source IPv4 address */
129 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
130 /* Destination IPv4 address */
131 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
132 /* Source IPv4 address for X722 */
133 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
134 /* Destination IPv4 address for X722 */
135 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
136 /* IPv4 Protocol for X722 */
137 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
138 /* IPv4 Time to Live for X722 */
139 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
140 /* IPv4 Type of Service (TOS) */
141 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
142 /* IPv4 Protocol */
143 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
144 /* IPv4 Time to Live */
145 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
146 /* Source IPv6 address */
147 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
148 /* Destination IPv6 address */
149 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
150 /* IPv6 Traffic Class (TC) */
151 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
152 /* IPv6 Next Header */
153 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
154 /* IPv6 Hop Limit */
155 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
156 /* Source L4 port */
157 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
158 /* Destination L4 port */
159 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
160 /* SCTP verification tag */
161 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
162 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
163 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
164 /* Source port of tunneling UDP */
165 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
166 /* Destination port of tunneling UDP */
167 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
168 /* UDP Tunneling ID, NVGRE/GRE key */
169 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
170 /* Last ether type */
171 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
172 /* Tunneling outer destination IPv4 address */
173 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
174 /* Tunneling outer destination IPv6 address */
175 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
176 /* 1st word of flex payload */
177 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
178 /* 2nd word of flex payload */
179 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
180 /* 3rd word of flex payload */
181 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
182 /* 4th word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
184 /* 5th word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
186 /* 6th word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
188 /* 7th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
190 /* 8th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
192 /* all 8 words flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
194 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
195
196 #define I40E_TRANSLATE_INSET 0
197 #define I40E_TRANSLATE_REG   1
198
199 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
200 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
201 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
202 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
203 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
204 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
205
206 /* PCI offset for querying capability */
207 #define PCI_DEV_CAP_REG            0xA4
208 /* PCI offset for enabling/disabling Extended Tag */
209 #define PCI_DEV_CTRL_REG           0xA8
210 /* Bit mask of Extended Tag capability */
211 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
212 /* Bit shift of Extended Tag enable/disable */
213 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
214 /* Bit mask of Extended Tag enable/disable */
215 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
216
217 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
218 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
219 static int i40e_dev_configure(struct rte_eth_dev *dev);
220 static int i40e_dev_start(struct rte_eth_dev *dev);
221 static void i40e_dev_stop(struct rte_eth_dev *dev);
222 static void i40e_dev_close(struct rte_eth_dev *dev);
223 static int  i40e_dev_reset(struct rte_eth_dev *dev);
224 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
225 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
226 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
227 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
228 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
229 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
230 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
231                                struct rte_eth_stats *stats);
232 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
233                                struct rte_eth_xstat *xstats, unsigned n);
234 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
235                                      struct rte_eth_xstat_name *xstats_names,
236                                      unsigned limit);
237 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
238 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
239                                             uint16_t queue_id,
240                                             uint8_t stat_idx,
241                                             uint8_t is_rx);
242 static int i40e_fw_version_get(struct rte_eth_dev *dev,
243                                 char *fw_version, size_t fw_size);
244 static void i40e_dev_info_get(struct rte_eth_dev *dev,
245                               struct rte_eth_dev_info *dev_info);
246 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
247                                 uint16_t vlan_id,
248                                 int on);
249 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
250                               enum rte_vlan_type vlan_type,
251                               uint16_t tpid);
252 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
253 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
254                                       uint16_t queue,
255                                       int on);
256 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
257 static int i40e_dev_led_on(struct rte_eth_dev *dev);
258 static int i40e_dev_led_off(struct rte_eth_dev *dev);
259 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
260                               struct rte_eth_fc_conf *fc_conf);
261 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
262                               struct rte_eth_fc_conf *fc_conf);
263 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
264                                        struct rte_eth_pfc_conf *pfc_conf);
265 static int i40e_macaddr_add(struct rte_eth_dev *dev,
266                             struct ether_addr *mac_addr,
267                             uint32_t index,
268                             uint32_t pool);
269 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
270 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
271                                     struct rte_eth_rss_reta_entry64 *reta_conf,
272                                     uint16_t reta_size);
273 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
274                                    struct rte_eth_rss_reta_entry64 *reta_conf,
275                                    uint16_t reta_size);
276
277 static int i40e_get_cap(struct i40e_hw *hw);
278 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
279 static int i40e_pf_setup(struct i40e_pf *pf);
280 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
281 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
282 static int i40e_dcb_setup(struct rte_eth_dev *dev);
283 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
284                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
285 static void i40e_stat_update_48(struct i40e_hw *hw,
286                                uint32_t hireg,
287                                uint32_t loreg,
288                                bool offset_loaded,
289                                uint64_t *offset,
290                                uint64_t *stat);
291 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
292 static void i40e_dev_interrupt_handler(void *param);
293 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
294                                 uint32_t base, uint32_t num);
295 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
296 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
297                         uint32_t base);
298 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
299                         uint16_t num);
300 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
301 static int i40e_veb_release(struct i40e_veb *veb);
302 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
303                                                 struct i40e_vsi *vsi);
304 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
305 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
306 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
307                                              struct i40e_macvlan_filter *mv_f,
308                                              int num,
309                                              uint16_t vlan);
310 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
311 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
312                                     struct rte_eth_rss_conf *rss_conf);
313 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
314                                       struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
316                                         struct rte_eth_udp_tunnel *udp_tunnel);
317 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
318                                         struct rte_eth_udp_tunnel *udp_tunnel);
319 static void i40e_filter_input_set_init(struct i40e_pf *pf);
320 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
321                                 enum rte_filter_op filter_op,
322                                 void *arg);
323 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
324                                 enum rte_filter_type filter_type,
325                                 enum rte_filter_op filter_op,
326                                 void *arg);
327 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
328                                   struct rte_eth_dcb_info *dcb_info);
329 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
330 static void i40e_configure_registers(struct i40e_hw *hw);
331 static void i40e_hw_init(struct rte_eth_dev *dev);
332 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
333 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
334                                                      uint16_t seid,
335                                                      uint16_t rule_type,
336                                                      uint16_t *entries,
337                                                      uint16_t count,
338                                                      uint16_t rule_id);
339 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
340                         struct rte_eth_mirror_conf *mirror_conf,
341                         uint8_t sw_id, uint8_t on);
342 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
343
344 static int i40e_timesync_enable(struct rte_eth_dev *dev);
345 static int i40e_timesync_disable(struct rte_eth_dev *dev);
346 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
347                                            struct timespec *timestamp,
348                                            uint32_t flags);
349 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
350                                            struct timespec *timestamp);
351 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
352
353 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
354
355 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
356                                    struct timespec *timestamp);
357 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
358                                     const struct timespec *timestamp);
359
360 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
361                                          uint16_t queue_id);
362 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
363                                           uint16_t queue_id);
364
365 static int i40e_get_regs(struct rte_eth_dev *dev,
366                          struct rte_dev_reg_info *regs);
367
368 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
369
370 static int i40e_get_eeprom(struct rte_eth_dev *dev,
371                            struct rte_dev_eeprom_info *eeprom);
372
373 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
374                                       struct ether_addr *mac_addr);
375
376 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
377
378 static int i40e_ethertype_filter_convert(
379         const struct rte_eth_ethertype_filter *input,
380         struct i40e_ethertype_filter *filter);
381 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
382                                    struct i40e_ethertype_filter *filter);
383
384 static int i40e_tunnel_filter_convert(
385         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
386         struct i40e_tunnel_filter *tunnel_filter);
387 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
388                                 struct i40e_tunnel_filter *tunnel_filter);
389 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
390
391 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
392 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
393 static void i40e_filter_restore(struct i40e_pf *pf);
394 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
395
396 int i40e_logtype_init;
397 int i40e_logtype_driver;
398
399 static const struct rte_pci_id pci_id_i40e_map[] = {
400         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
401         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
402         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
403         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
404         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
405         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
406         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
407         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
408         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
409         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
410         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
411         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
412         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
413         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
419         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
420         { .vendor_id = 0, /* sentinel */ },
421 };
422
423 static const struct eth_dev_ops i40e_eth_dev_ops = {
424         .dev_configure                = i40e_dev_configure,
425         .dev_start                    = i40e_dev_start,
426         .dev_stop                     = i40e_dev_stop,
427         .dev_close                    = i40e_dev_close,
428         .dev_reset                    = i40e_dev_reset,
429         .promiscuous_enable           = i40e_dev_promiscuous_enable,
430         .promiscuous_disable          = i40e_dev_promiscuous_disable,
431         .allmulticast_enable          = i40e_dev_allmulticast_enable,
432         .allmulticast_disable         = i40e_dev_allmulticast_disable,
433         .dev_set_link_up              = i40e_dev_set_link_up,
434         .dev_set_link_down            = i40e_dev_set_link_down,
435         .link_update                  = i40e_dev_link_update,
436         .stats_get                    = i40e_dev_stats_get,
437         .xstats_get                   = i40e_dev_xstats_get,
438         .xstats_get_names             = i40e_dev_xstats_get_names,
439         .stats_reset                  = i40e_dev_stats_reset,
440         .xstats_reset                 = i40e_dev_stats_reset,
441         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
442         .fw_version_get               = i40e_fw_version_get,
443         .dev_infos_get                = i40e_dev_info_get,
444         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
445         .vlan_filter_set              = i40e_vlan_filter_set,
446         .vlan_tpid_set                = i40e_vlan_tpid_set,
447         .vlan_offload_set             = i40e_vlan_offload_set,
448         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
449         .vlan_pvid_set                = i40e_vlan_pvid_set,
450         .rx_queue_start               = i40e_dev_rx_queue_start,
451         .rx_queue_stop                = i40e_dev_rx_queue_stop,
452         .tx_queue_start               = i40e_dev_tx_queue_start,
453         .tx_queue_stop                = i40e_dev_tx_queue_stop,
454         .rx_queue_setup               = i40e_dev_rx_queue_setup,
455         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
456         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
457         .rx_queue_release             = i40e_dev_rx_queue_release,
458         .rx_queue_count               = i40e_dev_rx_queue_count,
459         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
460         .rx_descriptor_status         = i40e_dev_rx_descriptor_status,
461         .tx_descriptor_status         = i40e_dev_tx_descriptor_status,
462         .tx_queue_setup               = i40e_dev_tx_queue_setup,
463         .tx_queue_release             = i40e_dev_tx_queue_release,
464         .dev_led_on                   = i40e_dev_led_on,
465         .dev_led_off                  = i40e_dev_led_off,
466         .flow_ctrl_get                = i40e_flow_ctrl_get,
467         .flow_ctrl_set                = i40e_flow_ctrl_set,
468         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
469         .mac_addr_add                 = i40e_macaddr_add,
470         .mac_addr_remove              = i40e_macaddr_remove,
471         .reta_update                  = i40e_dev_rss_reta_update,
472         .reta_query                   = i40e_dev_rss_reta_query,
473         .rss_hash_update              = i40e_dev_rss_hash_update,
474         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
475         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
476         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
477         .filter_ctrl                  = i40e_dev_filter_ctrl,
478         .rxq_info_get                 = i40e_rxq_info_get,
479         .txq_info_get                 = i40e_txq_info_get,
480         .mirror_rule_set              = i40e_mirror_rule_set,
481         .mirror_rule_reset            = i40e_mirror_rule_reset,
482         .timesync_enable              = i40e_timesync_enable,
483         .timesync_disable             = i40e_timesync_disable,
484         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
485         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
486         .get_dcb_info                 = i40e_dev_get_dcb_info,
487         .timesync_adjust_time         = i40e_timesync_adjust_time,
488         .timesync_read_time           = i40e_timesync_read_time,
489         .timesync_write_time          = i40e_timesync_write_time,
490         .get_reg                      = i40e_get_regs,
491         .get_eeprom_length            = i40e_get_eeprom_length,
492         .get_eeprom                   = i40e_get_eeprom,
493         .mac_addr_set                 = i40e_set_default_mac_addr,
494         .mtu_set                      = i40e_dev_mtu_set,
495         .tm_ops_get                   = i40e_tm_ops_get,
496 };
497
498 /* store statistics names and its offset in stats structure */
499 struct rte_i40e_xstats_name_off {
500         char name[RTE_ETH_XSTATS_NAME_SIZE];
501         unsigned offset;
502 };
503
504 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
505         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
506         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
507         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
508         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
509         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
510                 rx_unknown_protocol)},
511         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
512         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
513         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
514         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
515 };
516
517 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
518                 sizeof(rte_i40e_stats_strings[0]))
519
520 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
521         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
522                 tx_dropped_link_down)},
523         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
524         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
525                 illegal_bytes)},
526         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
527         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
528                 mac_local_faults)},
529         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
530                 mac_remote_faults)},
531         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
532                 rx_length_errors)},
533         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
534         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
535         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
536         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
537         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
538         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
539                 rx_size_127)},
540         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
541                 rx_size_255)},
542         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
543                 rx_size_511)},
544         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
545                 rx_size_1023)},
546         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
547                 rx_size_1522)},
548         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
549                 rx_size_big)},
550         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
551                 rx_undersize)},
552         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
553                 rx_oversize)},
554         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
555                 mac_short_packet_dropped)},
556         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
557                 rx_fragments)},
558         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
559         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
560         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
561                 tx_size_127)},
562         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
563                 tx_size_255)},
564         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
565                 tx_size_511)},
566         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
567                 tx_size_1023)},
568         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
569                 tx_size_1522)},
570         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
571                 tx_size_big)},
572         {"rx_flow_director_atr_match_packets",
573                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
574         {"rx_flow_director_sb_match_packets",
575                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
576         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
577                 tx_lpi_status)},
578         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
579                 rx_lpi_status)},
580         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
581                 tx_lpi_count)},
582         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
583                 rx_lpi_count)},
584 };
585
586 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
587                 sizeof(rte_i40e_hw_port_strings[0]))
588
589 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
590         {"xon_packets", offsetof(struct i40e_hw_port_stats,
591                 priority_xon_rx)},
592         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
593                 priority_xoff_rx)},
594 };
595
596 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
597                 sizeof(rte_i40e_rxq_prio_strings[0]))
598
599 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
600         {"xon_packets", offsetof(struct i40e_hw_port_stats,
601                 priority_xon_tx)},
602         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
603                 priority_xoff_tx)},
604         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
605                 priority_xon_2_xoff)},
606 };
607
608 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
609                 sizeof(rte_i40e_txq_prio_strings[0]))
610
611 static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
612         struct rte_pci_device *pci_dev)
613 {
614         return rte_eth_dev_pci_generic_probe(pci_dev,
615                 sizeof(struct i40e_adapter), eth_i40e_dev_init);
616 }
617
618 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
619 {
620         return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
621 }
622
623 static struct rte_pci_driver rte_i40e_pmd = {
624         .id_table = pci_id_i40e_map,
625         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
626                      RTE_PCI_DRV_IOVA_AS_VA,
627         .probe = eth_i40e_pci_probe,
628         .remove = eth_i40e_pci_remove,
629 };
630
631 static inline int
632 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
633                                      struct rte_eth_link *link)
634 {
635         struct rte_eth_link *dst = link;
636         struct rte_eth_link *src = &(dev->data->dev_link);
637
638         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
639                                         *(uint64_t *)src) == 0)
640                 return -1;
641
642         return 0;
643 }
644
645 static inline int
646 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
647                                       struct rte_eth_link *link)
648 {
649         struct rte_eth_link *dst = &(dev->data->dev_link);
650         struct rte_eth_link *src = link;
651
652         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
653                                         *(uint64_t *)src) == 0)
654                 return -1;
655
656         return 0;
657 }
658
659 static inline void
660 i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
661 {
662         i40e_write_rx_ctl(hw, reg_addr, reg_val);
663         PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
664                     "with value 0x%08x",
665                     reg_addr, reg_val);
666 }
667
668 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
669 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
670 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
671
672 #ifndef I40E_GLQF_ORT
673 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
674 #endif
675 #ifndef I40E_GLQF_PIT
676 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
677 #endif
678 #ifndef I40E_GLQF_L3_MAP
679 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
680 #endif
681
682 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
683 {
684         /*
685          * Initialize registers for parsing packet type of QinQ
686          * This should be removed from code once proper
687          * configuration API is added to avoid configuration conflicts
688          * between ports of the same device.
689          */
690         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
691         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
692         i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
693 }
694
695 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
696
697 /*
698  * Add a ethertype filter to drop all flow control frames transmitted
699  * from VSIs.
700 */
701 static void
702 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
703 {
704         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
705         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
706                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
707                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
708         int ret;
709
710         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
711                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
712                                 pf->main_vsi_seid, 0,
713                                 TRUE, NULL, NULL);
714         if (ret)
715                 PMD_INIT_LOG(ERR,
716                         "Failed to add filter to drop flow control frames from VSIs.");
717 }
718
719 static int
720 floating_veb_list_handler(__rte_unused const char *key,
721                           const char *floating_veb_value,
722                           void *opaque)
723 {
724         int idx = 0;
725         unsigned int count = 0;
726         char *end = NULL;
727         int min, max;
728         bool *vf_floating_veb = opaque;
729
730         while (isblank(*floating_veb_value))
731                 floating_veb_value++;
732
733         /* Reset floating VEB configuration for VFs */
734         for (idx = 0; idx < I40E_MAX_VF; idx++)
735                 vf_floating_veb[idx] = false;
736
737         min = I40E_MAX_VF;
738         do {
739                 while (isblank(*floating_veb_value))
740                         floating_veb_value++;
741                 if (*floating_veb_value == '\0')
742                         return -1;
743                 errno = 0;
744                 idx = strtoul(floating_veb_value, &end, 10);
745                 if (errno || end == NULL)
746                         return -1;
747                 while (isblank(*end))
748                         end++;
749                 if (*end == '-') {
750                         min = idx;
751                 } else if ((*end == ';') || (*end == '\0')) {
752                         max = idx;
753                         if (min == I40E_MAX_VF)
754                                 min = idx;
755                         if (max >= I40E_MAX_VF)
756                                 max = I40E_MAX_VF - 1;
757                         for (idx = min; idx <= max; idx++) {
758                                 vf_floating_veb[idx] = true;
759                                 count++;
760                         }
761                         min = I40E_MAX_VF;
762                 } else {
763                         return -1;
764                 }
765                 floating_veb_value = end + 1;
766         } while (*end != '\0');
767
768         if (count == 0)
769                 return -1;
770
771         return 0;
772 }
773
774 static void
775 config_vf_floating_veb(struct rte_devargs *devargs,
776                        uint16_t floating_veb,
777                        bool *vf_floating_veb)
778 {
779         struct rte_kvargs *kvlist;
780         int i;
781         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
782
783         if (!floating_veb)
784                 return;
785         /* All the VFs attach to the floating VEB by default
786          * when the floating VEB is enabled.
787          */
788         for (i = 0; i < I40E_MAX_VF; i++)
789                 vf_floating_veb[i] = true;
790
791         if (devargs == NULL)
792                 return;
793
794         kvlist = rte_kvargs_parse(devargs->args, NULL);
795         if (kvlist == NULL)
796                 return;
797
798         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
799                 rte_kvargs_free(kvlist);
800                 return;
801         }
802         /* When the floating_veb_list parameter exists, all the VFs
803          * will attach to the legacy VEB firstly, then configure VFs
804          * to the floating VEB according to the floating_veb_list.
805          */
806         if (rte_kvargs_process(kvlist, floating_veb_list,
807                                floating_veb_list_handler,
808                                vf_floating_veb) < 0) {
809                 rte_kvargs_free(kvlist);
810                 return;
811         }
812         rte_kvargs_free(kvlist);
813 }
814
815 static int
816 i40e_check_floating_handler(__rte_unused const char *key,
817                             const char *value,
818                             __rte_unused void *opaque)
819 {
820         if (strcmp(value, "1"))
821                 return -1;
822
823         return 0;
824 }
825
826 static int
827 is_floating_veb_supported(struct rte_devargs *devargs)
828 {
829         struct rte_kvargs *kvlist;
830         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
831
832         if (devargs == NULL)
833                 return 0;
834
835         kvlist = rte_kvargs_parse(devargs->args, NULL);
836         if (kvlist == NULL)
837                 return 0;
838
839         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
840                 rte_kvargs_free(kvlist);
841                 return 0;
842         }
843         /* Floating VEB is enabled when there's key-value:
844          * enable_floating_veb=1
845          */
846         if (rte_kvargs_process(kvlist, floating_veb_key,
847                                i40e_check_floating_handler, NULL) < 0) {
848                 rte_kvargs_free(kvlist);
849                 return 0;
850         }
851         rte_kvargs_free(kvlist);
852
853         return 1;
854 }
855
856 static void
857 config_floating_veb(struct rte_eth_dev *dev)
858 {
859         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
860         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
861         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
862
863         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
864
865         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
866                 pf->floating_veb =
867                         is_floating_veb_supported(pci_dev->device.devargs);
868                 config_vf_floating_veb(pci_dev->device.devargs,
869                                        pf->floating_veb,
870                                        pf->floating_veb_list);
871         } else {
872                 pf->floating_veb = false;
873         }
874 }
875
876 #define I40E_L2_TAGS_S_TAG_SHIFT 1
877 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
878
879 static int
880 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
881 {
882         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
883         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
884         char ethertype_hash_name[RTE_HASH_NAMESIZE];
885         int ret;
886
887         struct rte_hash_parameters ethertype_hash_params = {
888                 .name = ethertype_hash_name,
889                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
890                 .key_len = sizeof(struct i40e_ethertype_filter_input),
891                 .hash_func = rte_hash_crc,
892                 .hash_func_init_val = 0,
893                 .socket_id = rte_socket_id(),
894         };
895
896         /* Initialize ethertype filter rule list and hash */
897         TAILQ_INIT(&ethertype_rule->ethertype_list);
898         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
899                  "ethertype_%s", dev->device->name);
900         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
901         if (!ethertype_rule->hash_table) {
902                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
903                 return -EINVAL;
904         }
905         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
906                                        sizeof(struct i40e_ethertype_filter *) *
907                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
908                                        0);
909         if (!ethertype_rule->hash_map) {
910                 PMD_INIT_LOG(ERR,
911                              "Failed to allocate memory for ethertype hash map!");
912                 ret = -ENOMEM;
913                 goto err_ethertype_hash_map_alloc;
914         }
915
916         return 0;
917
918 err_ethertype_hash_map_alloc:
919         rte_hash_free(ethertype_rule->hash_table);
920
921         return ret;
922 }
923
924 static int
925 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
926 {
927         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
928         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
929         char tunnel_hash_name[RTE_HASH_NAMESIZE];
930         int ret;
931
932         struct rte_hash_parameters tunnel_hash_params = {
933                 .name = tunnel_hash_name,
934                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
935                 .key_len = sizeof(struct i40e_tunnel_filter_input),
936                 .hash_func = rte_hash_crc,
937                 .hash_func_init_val = 0,
938                 .socket_id = rte_socket_id(),
939         };
940
941         /* Initialize tunnel filter rule list and hash */
942         TAILQ_INIT(&tunnel_rule->tunnel_list);
943         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
944                  "tunnel_%s", dev->device->name);
945         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
946         if (!tunnel_rule->hash_table) {
947                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
948                 return -EINVAL;
949         }
950         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
951                                     sizeof(struct i40e_tunnel_filter *) *
952                                     I40E_MAX_TUNNEL_FILTER_NUM,
953                                     0);
954         if (!tunnel_rule->hash_map) {
955                 PMD_INIT_LOG(ERR,
956                              "Failed to allocate memory for tunnel hash map!");
957                 ret = -ENOMEM;
958                 goto err_tunnel_hash_map_alloc;
959         }
960
961         return 0;
962
963 err_tunnel_hash_map_alloc:
964         rte_hash_free(tunnel_rule->hash_table);
965
966         return ret;
967 }
968
969 static int
970 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
971 {
972         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
973         struct i40e_fdir_info *fdir_info = &pf->fdir;
974         char fdir_hash_name[RTE_HASH_NAMESIZE];
975         int ret;
976
977         struct rte_hash_parameters fdir_hash_params = {
978                 .name = fdir_hash_name,
979                 .entries = I40E_MAX_FDIR_FILTER_NUM,
980                 .key_len = sizeof(struct i40e_fdir_input),
981                 .hash_func = rte_hash_crc,
982                 .hash_func_init_val = 0,
983                 .socket_id = rte_socket_id(),
984         };
985
986         /* Initialize flow director filter rule list and hash */
987         TAILQ_INIT(&fdir_info->fdir_list);
988         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
989                  "fdir_%s", dev->device->name);
990         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
991         if (!fdir_info->hash_table) {
992                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
993                 return -EINVAL;
994         }
995         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
996                                           sizeof(struct i40e_fdir_filter *) *
997                                           I40E_MAX_FDIR_FILTER_NUM,
998                                           0);
999         if (!fdir_info->hash_map) {
1000                 PMD_INIT_LOG(ERR,
1001                              "Failed to allocate memory for fdir hash map!");
1002                 ret = -ENOMEM;
1003                 goto err_fdir_hash_map_alloc;
1004         }
1005         return 0;
1006
1007 err_fdir_hash_map_alloc:
1008         rte_hash_free(fdir_info->hash_table);
1009
1010         return ret;
1011 }
1012
1013 static void
1014 i40e_init_customized_info(struct i40e_pf *pf)
1015 {
1016         int i;
1017
1018         /* Initialize customized pctype */
1019         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1020                 pf->customized_pctype[i].index = i;
1021                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1022                 pf->customized_pctype[i].valid = false;
1023         }
1024
1025         pf->gtp_support = false;
1026 }
1027
1028 void
1029 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1030 {
1031         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1032         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1033         struct i40e_queue_regions *info = &pf->queue_region;
1034         uint16_t i;
1035
1036         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1037                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1038
1039         memset(info, 0, sizeof(struct i40e_queue_regions));
1040 }
1041
1042 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
1043
1044 static int
1045 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1046                                const char *value,
1047                                void *opaque)
1048 {
1049         struct i40e_pf *pf;
1050         unsigned long support_multi_driver;
1051         char *end;
1052
1053         pf = (struct i40e_pf *)opaque;
1054
1055         errno = 0;
1056         support_multi_driver = strtoul(value, &end, 10);
1057         if (errno != 0 || end == value || *end != 0) {
1058                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1059                 return -(EINVAL);
1060         }
1061
1062         if (support_multi_driver == 1 || support_multi_driver == 0)
1063                 pf->support_multi_driver = (bool)support_multi_driver;
1064         else
1065                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1066                             "enable global configuration by default."
1067                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1068         return 0;
1069 }
1070
1071 static int
1072 i40e_support_multi_driver(struct rte_eth_dev *dev)
1073 {
1074         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1075         static const char *const valid_keys[] = {
1076                 ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
1077         struct rte_kvargs *kvlist;
1078
1079         /* Enable global configuration by default */
1080         pf->support_multi_driver = false;
1081
1082         if (!dev->device->devargs)
1083                 return 0;
1084
1085         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1086         if (!kvlist)
1087                 return -EINVAL;
1088
1089         if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
1090                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1091                             "the first invalid or last valid one is used !",
1092                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1093
1094         rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1095                            i40e_parse_multi_drv_handler, pf);
1096         rte_kvargs_free(kvlist);
1097         return 0;
1098 }
1099
1100 static int
1101 eth_i40e_dev_init(struct rte_eth_dev *dev)
1102 {
1103         struct rte_pci_device *pci_dev;
1104         struct rte_intr_handle *intr_handle;
1105         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1106         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1107         struct i40e_vsi *vsi;
1108         int ret;
1109         uint32_t len;
1110         uint8_t aq_fail = 0;
1111
1112         PMD_INIT_FUNC_TRACE();
1113
1114         dev->dev_ops = &i40e_eth_dev_ops;
1115         dev->rx_pkt_burst = i40e_recv_pkts;
1116         dev->tx_pkt_burst = i40e_xmit_pkts;
1117         dev->tx_pkt_prepare = i40e_prep_pkts;
1118
1119         /* for secondary processes, we don't initialise any further as primary
1120          * has already done this work. Only check we don't need a different
1121          * RX function */
1122         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1123                 i40e_set_rx_function(dev);
1124                 i40e_set_tx_function(dev);
1125                 return 0;
1126         }
1127         i40e_set_default_ptype_table(dev);
1128         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1129         intr_handle = &pci_dev->intr_handle;
1130
1131         rte_eth_copy_pci_info(dev, pci_dev);
1132
1133         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1134         pf->adapter->eth_dev = dev;
1135         pf->dev_data = dev->data;
1136
1137         hw->back = I40E_PF_TO_ADAPTER(pf);
1138         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1139         if (!hw->hw_addr) {
1140                 PMD_INIT_LOG(ERR,
1141                         "Hardware is not available, as address is NULL");
1142                 return -ENODEV;
1143         }
1144
1145         hw->vendor_id = pci_dev->id.vendor_id;
1146         hw->device_id = pci_dev->id.device_id;
1147         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1148         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1149         hw->bus.device = pci_dev->addr.devid;
1150         hw->bus.func = pci_dev->addr.function;
1151         hw->adapter_stopped = 0;
1152
1153         /* Check if need to support multi-driver */
1154         i40e_support_multi_driver(dev);
1155
1156         /* Make sure all is clean before doing PF reset */
1157         i40e_clear_hw(hw);
1158
1159         /* Initialize the hardware */
1160         i40e_hw_init(dev);
1161
1162         /* Reset here to make sure all is clean for each PF */
1163         ret = i40e_pf_reset(hw);
1164         if (ret) {
1165                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1166                 return ret;
1167         }
1168
1169         /* Initialize the shared code (base driver) */
1170         ret = i40e_init_shared_code(hw);
1171         if (ret) {
1172                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1173                 return ret;
1174         }
1175
1176         i40e_set_default_pctype_table(dev);
1177
1178         /*
1179          * To work around the NVM issue, initialize registers
1180          * for packet type of QinQ by software.
1181          * It should be removed once issues are fixed in NVM.
1182          */
1183         if (!pf->support_multi_driver)
1184                 i40e_GLQF_reg_init(hw);
1185
1186         /* Initialize the input set for filters (hash and fd) to default value */
1187         i40e_filter_input_set_init(pf);
1188
1189         /* Initialize the parameters for adminq */
1190         i40e_init_adminq_parameter(hw);
1191         ret = i40e_init_adminq(hw);
1192         if (ret != I40E_SUCCESS) {
1193                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1194                 return -EIO;
1195         }
1196         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1197                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1198                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1199                      ((hw->nvm.version >> 12) & 0xf),
1200                      ((hw->nvm.version >> 4) & 0xff),
1201                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1202
1203         /* initialise the L3_MAP register */
1204         if (!pf->support_multi_driver) {
1205                 ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
1206                                                    0x00000028,  NULL);
1207                 if (ret)
1208                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1209                                      ret);
1210                 PMD_INIT_LOG(DEBUG,
1211                              "Global register 0x%08x is changed with 0x28",
1212                              I40E_GLQF_L3_MAP(40));
1213                 i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER);
1214         }
1215
1216         /* Need the special FW version to support floating VEB */
1217         config_floating_veb(dev);
1218         /* Clear PXE mode */
1219         i40e_clear_pxe_mode(hw);
1220         i40e_dev_sync_phy_type(hw);
1221
1222         /*
1223          * On X710, performance number is far from the expectation on recent
1224          * firmware versions. The fix for this issue may not be integrated in
1225          * the following firmware version. So the workaround in software driver
1226          * is needed. It needs to modify the initial values of 3 internal only
1227          * registers. Note that the workaround can be removed when it is fixed
1228          * in firmware in the future.
1229          */
1230         i40e_configure_registers(hw);
1231
1232         /* Get hw capabilities */
1233         ret = i40e_get_cap(hw);
1234         if (ret != I40E_SUCCESS) {
1235                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1236                 goto err_get_capabilities;
1237         }
1238
1239         /* Initialize parameters for PF */
1240         ret = i40e_pf_parameter_init(dev);
1241         if (ret != 0) {
1242                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1243                 goto err_parameter_init;
1244         }
1245
1246         /* Initialize the queue management */
1247         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1248         if (ret < 0) {
1249                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1250                 goto err_qp_pool_init;
1251         }
1252         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1253                                 hw->func_caps.num_msix_vectors - 1);
1254         if (ret < 0) {
1255                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1256                 goto err_msix_pool_init;
1257         }
1258
1259         /* Initialize lan hmc */
1260         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1261                                 hw->func_caps.num_rx_qp, 0, 0);
1262         if (ret != I40E_SUCCESS) {
1263                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1264                 goto err_init_lan_hmc;
1265         }
1266
1267         /* Configure lan hmc */
1268         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1269         if (ret != I40E_SUCCESS) {
1270                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1271                 goto err_configure_lan_hmc;
1272         }
1273
1274         /* Get and check the mac address */
1275         i40e_get_mac_addr(hw, hw->mac.addr);
1276         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1277                 PMD_INIT_LOG(ERR, "mac address is not valid");
1278                 ret = -EIO;
1279                 goto err_get_mac_addr;
1280         }
1281         /* Copy the permanent MAC address */
1282         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1283                         (struct ether_addr *) hw->mac.perm_addr);
1284
1285         /* Disable flow control */
1286         hw->fc.requested_mode = I40E_FC_NONE;
1287         i40e_set_fc(hw, &aq_fail, TRUE);
1288
1289         /* Set the global registers with default ether type value */
1290         if (!pf->support_multi_driver) {
1291                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1292                                          ETHER_TYPE_VLAN);
1293                 if (ret != I40E_SUCCESS) {
1294                         PMD_INIT_LOG(ERR,
1295                                      "Failed to set the default outer "
1296                                      "VLAN ether type");
1297                         goto err_setup_pf_switch;
1298                 }
1299         }
1300
1301         /* PF setup, which includes VSI setup */
1302         ret = i40e_pf_setup(pf);
1303         if (ret) {
1304                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1305                 goto err_setup_pf_switch;
1306         }
1307
1308         /* reset all stats of the device, including pf and main vsi */
1309         i40e_dev_stats_reset(dev);
1310
1311         vsi = pf->main_vsi;
1312
1313         /* Disable double vlan by default */
1314         i40e_vsi_config_double_vlan(vsi, FALSE);
1315
1316         /* Disable S-TAG identification when floating_veb is disabled */
1317         if (!pf->floating_veb) {
1318                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1319                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1320                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1321                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1322                 }
1323         }
1324
1325         if (!vsi->max_macaddrs)
1326                 len = ETHER_ADDR_LEN;
1327         else
1328                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1329
1330         /* Should be after VSI initialized */
1331         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1332         if (!dev->data->mac_addrs) {
1333                 PMD_INIT_LOG(ERR,
1334                         "Failed to allocated memory for storing mac address");
1335                 goto err_mac_alloc;
1336         }
1337         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1338                                         &dev->data->mac_addrs[0]);
1339
1340         /* Init dcb to sw mode by default */
1341         ret = i40e_dcb_init_configure(dev, TRUE);
1342         if (ret != I40E_SUCCESS) {
1343                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1344                 pf->flags &= ~I40E_FLAG_DCB;
1345         }
1346         /* Update HW struct after DCB configuration */
1347         i40e_get_cap(hw);
1348
1349         /* initialize pf host driver to setup SRIOV resource if applicable */
1350         i40e_pf_host_init(dev);
1351
1352         /* register callback func to eal lib */
1353         rte_intr_callback_register(intr_handle,
1354                                    i40e_dev_interrupt_handler, dev);
1355
1356         /* configure and enable device interrupt */
1357         i40e_pf_config_irq0(hw, TRUE);
1358         i40e_pf_enable_irq0(hw);
1359
1360         /* enable uio intr after callback register */
1361         rte_intr_enable(intr_handle);
1362
1363         /* By default disable flexible payload in global configuration */
1364         if (!pf->support_multi_driver)
1365                 i40e_flex_payload_reg_set_default(hw);
1366
1367         /*
1368          * Add an ethertype filter to drop all flow control frames transmitted
1369          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1370          * frames to wire.
1371          */
1372         i40e_add_tx_flow_control_drop_filter(pf);
1373
1374         /* Set the max frame size to 0x2600 by default,
1375          * in case other drivers changed the default value.
1376          */
1377         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1378
1379         /* initialize mirror rule list */
1380         TAILQ_INIT(&pf->mirror_list);
1381
1382         /* initialize Traffic Manager configuration */
1383         i40e_tm_conf_init(dev);
1384
1385         /* Initialize customized information */
1386         i40e_init_customized_info(pf);
1387
1388         ret = i40e_init_ethtype_filter_list(dev);
1389         if (ret < 0)
1390                 goto err_init_ethtype_filter_list;
1391         ret = i40e_init_tunnel_filter_list(dev);
1392         if (ret < 0)
1393                 goto err_init_tunnel_filter_list;
1394         ret = i40e_init_fdir_filter_list(dev);
1395         if (ret < 0)
1396                 goto err_init_fdir_filter_list;
1397
1398         /* initialize queue region configuration */
1399         i40e_init_queue_region_conf(dev);
1400
1401         /* initialize rss configuration from rte_flow */
1402         memset(&pf->rss_info, 0,
1403                 sizeof(struct i40e_rte_flow_rss_conf));
1404
1405         return 0;
1406
1407 err_init_fdir_filter_list:
1408         rte_free(pf->tunnel.hash_table);
1409         rte_free(pf->tunnel.hash_map);
1410 err_init_tunnel_filter_list:
1411         rte_free(pf->ethertype.hash_table);
1412         rte_free(pf->ethertype.hash_map);
1413 err_init_ethtype_filter_list:
1414         rte_free(dev->data->mac_addrs);
1415 err_mac_alloc:
1416         i40e_vsi_release(pf->main_vsi);
1417 err_setup_pf_switch:
1418 err_get_mac_addr:
1419 err_configure_lan_hmc:
1420         (void)i40e_shutdown_lan_hmc(hw);
1421 err_init_lan_hmc:
1422         i40e_res_pool_destroy(&pf->msix_pool);
1423 err_msix_pool_init:
1424         i40e_res_pool_destroy(&pf->qp_pool);
1425 err_qp_pool_init:
1426 err_parameter_init:
1427 err_get_capabilities:
1428         (void)i40e_shutdown_adminq(hw);
1429
1430         return ret;
1431 }
1432
1433 static void
1434 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1435 {
1436         struct i40e_ethertype_filter *p_ethertype;
1437         struct i40e_ethertype_rule *ethertype_rule;
1438
1439         ethertype_rule = &pf->ethertype;
1440         /* Remove all ethertype filter rules and hash */
1441         if (ethertype_rule->hash_map)
1442                 rte_free(ethertype_rule->hash_map);
1443         if (ethertype_rule->hash_table)
1444                 rte_hash_free(ethertype_rule->hash_table);
1445
1446         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1447                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1448                              p_ethertype, rules);
1449                 rte_free(p_ethertype);
1450         }
1451 }
1452
1453 static void
1454 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1455 {
1456         struct i40e_tunnel_filter *p_tunnel;
1457         struct i40e_tunnel_rule *tunnel_rule;
1458
1459         tunnel_rule = &pf->tunnel;
1460         /* Remove all tunnel director rules and hash */
1461         if (tunnel_rule->hash_map)
1462                 rte_free(tunnel_rule->hash_map);
1463         if (tunnel_rule->hash_table)
1464                 rte_hash_free(tunnel_rule->hash_table);
1465
1466         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1467                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1468                 rte_free(p_tunnel);
1469         }
1470 }
1471
1472 static void
1473 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1474 {
1475         struct i40e_fdir_filter *p_fdir;
1476         struct i40e_fdir_info *fdir_info;
1477
1478         fdir_info = &pf->fdir;
1479         /* Remove all flow director rules and hash */
1480         if (fdir_info->hash_map)
1481                 rte_free(fdir_info->hash_map);
1482         if (fdir_info->hash_table)
1483                 rte_hash_free(fdir_info->hash_table);
1484
1485         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1486                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1487                 rte_free(p_fdir);
1488         }
1489 }
1490
1491 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1492 {
1493         /*
1494          * Disable by default flexible payload
1495          * for corresponding L2/L3/L4 layers.
1496          */
1497         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1498         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1499         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1500         i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD);
1501 }
1502
1503 static int
1504 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1505 {
1506         struct i40e_pf *pf;
1507         struct rte_pci_device *pci_dev;
1508         struct rte_intr_handle *intr_handle;
1509         struct i40e_hw *hw;
1510         struct i40e_filter_control_settings settings;
1511         struct rte_flow *p_flow;
1512         int ret;
1513         uint8_t aq_fail = 0;
1514
1515         PMD_INIT_FUNC_TRACE();
1516
1517         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1518                 return 0;
1519
1520         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1521         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1522         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1523         intr_handle = &pci_dev->intr_handle;
1524
1525         if (hw->adapter_stopped == 0)
1526                 i40e_dev_close(dev);
1527
1528         dev->dev_ops = NULL;
1529         dev->rx_pkt_burst = NULL;
1530         dev->tx_pkt_burst = NULL;
1531
1532         /* Clear PXE mode */
1533         i40e_clear_pxe_mode(hw);
1534
1535         /* Unconfigure filter control */
1536         memset(&settings, 0, sizeof(settings));
1537         ret = i40e_set_filter_control(hw, &settings);
1538         if (ret)
1539                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1540                                         ret);
1541
1542         /* Disable flow control */
1543         hw->fc.requested_mode = I40E_FC_NONE;
1544         i40e_set_fc(hw, &aq_fail, TRUE);
1545
1546         /* uninitialize pf host driver */
1547         i40e_pf_host_uninit(dev);
1548
1549         rte_free(dev->data->mac_addrs);
1550         dev->data->mac_addrs = NULL;
1551
1552         /* disable uio intr before callback unregister */
1553         rte_intr_disable(intr_handle);
1554
1555         /* register callback func to eal lib */
1556         rte_intr_callback_unregister(intr_handle,
1557                                      i40e_dev_interrupt_handler, dev);
1558
1559         i40e_rm_ethtype_filter_list(pf);
1560         i40e_rm_tunnel_filter_list(pf);
1561         i40e_rm_fdir_filter_list(pf);
1562
1563         /* Remove all flows */
1564         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1565                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1566                 rte_free(p_flow);
1567         }
1568
1569         /* Remove all Traffic Manager configuration */
1570         i40e_tm_conf_uninit(dev);
1571
1572         return 0;
1573 }
1574
1575 static int
1576 i40e_dev_configure(struct rte_eth_dev *dev)
1577 {
1578         struct i40e_adapter *ad =
1579                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1580         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1581         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1582         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1583         int i, ret;
1584
1585         ret = i40e_dev_sync_phy_type(hw);
1586         if (ret)
1587                 return ret;
1588
1589         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1590          * bulk allocation or vector Rx preconditions we will reset it.
1591          */
1592         ad->rx_bulk_alloc_allowed = true;
1593         ad->rx_vec_allowed = true;
1594         ad->tx_simple_allowed = true;
1595         ad->tx_vec_allowed = true;
1596
1597         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1598                 ret = i40e_fdir_setup(pf);
1599                 if (ret != I40E_SUCCESS) {
1600                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1601                         return -ENOTSUP;
1602                 }
1603                 ret = i40e_fdir_configure(dev);
1604                 if (ret < 0) {
1605                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1606                         goto err;
1607                 }
1608         } else
1609                 i40e_fdir_teardown(pf);
1610
1611         ret = i40e_dev_init_vlan(dev);
1612         if (ret < 0)
1613                 goto err;
1614
1615         /* VMDQ setup.
1616          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1617          *  RSS setting have different requirements.
1618          *  General PMD driver call sequence are NIC init, configure,
1619          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1620          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1621          *  applicable. So, VMDQ setting has to be done before
1622          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1623          *  For RSS setting, it will try to calculate actual configured RX queue
1624          *  number, which will be available after rx_queue_setup(). dev_start()
1625          *  function is good to place RSS setup.
1626          */
1627         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1628                 ret = i40e_vmdq_setup(dev);
1629                 if (ret)
1630                         goto err;
1631         }
1632
1633         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1634                 ret = i40e_dcb_setup(dev);
1635                 if (ret) {
1636                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1637                         goto err_dcb;
1638                 }
1639         }
1640
1641         TAILQ_INIT(&pf->flow_list);
1642
1643         return 0;
1644
1645 err_dcb:
1646         /* need to release vmdq resource if exists */
1647         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1648                 i40e_vsi_release(pf->vmdq[i].vsi);
1649                 pf->vmdq[i].vsi = NULL;
1650         }
1651         rte_free(pf->vmdq);
1652         pf->vmdq = NULL;
1653 err:
1654         /* need to release fdir resource if exists */
1655         i40e_fdir_teardown(pf);
1656         return ret;
1657 }
1658
1659 void
1660 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1661 {
1662         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1663         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1664         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1665         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1666         uint16_t msix_vect = vsi->msix_intr;
1667         uint16_t i;
1668
1669         for (i = 0; i < vsi->nb_qps; i++) {
1670                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1671                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1672                 rte_wmb();
1673         }
1674
1675         if (vsi->type != I40E_VSI_SRIOV) {
1676                 if (!rte_intr_allow_others(intr_handle)) {
1677                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1678                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1679                         I40E_WRITE_REG(hw,
1680                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1681                                        0);
1682                 } else {
1683                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1684                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1685                         I40E_WRITE_REG(hw,
1686                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1687                                                        msix_vect - 1), 0);
1688                 }
1689         } else {
1690                 uint32_t reg;
1691                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1692                         vsi->user_param + (msix_vect - 1);
1693
1694                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1695                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1696         }
1697         I40E_WRITE_FLUSH(hw);
1698 }
1699
1700 static void
1701 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1702                        int base_queue, int nb_queue,
1703                        uint16_t itr_idx)
1704 {
1705         int i;
1706         uint32_t val;
1707         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1708
1709         /* Bind all RX queues to allocated MSIX interrupt */
1710         for (i = 0; i < nb_queue; i++) {
1711                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1712                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1713                         ((base_queue + i + 1) <<
1714                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1715                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1716                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1717
1718                 if (i == nb_queue - 1)
1719                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1720                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1721         }
1722
1723         /* Write first RX queue to Link list register as the head element */
1724         if (vsi->type != I40E_VSI_SRIOV) {
1725                 uint16_t interval =
1726                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1);
1727
1728                 if (msix_vect == I40E_MISC_VEC_ID) {
1729                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1730                                        (base_queue <<
1731                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1732                                        (0x0 <<
1733                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1734                         I40E_WRITE_REG(hw,
1735                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1736                                        interval);
1737                 } else {
1738                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1739                                        (base_queue <<
1740                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1741                                        (0x0 <<
1742                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1743                         I40E_WRITE_REG(hw,
1744                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1745                                                        msix_vect - 1),
1746                                        interval);
1747                 }
1748         } else {
1749                 uint32_t reg;
1750
1751                 if (msix_vect == I40E_MISC_VEC_ID) {
1752                         I40E_WRITE_REG(hw,
1753                                        I40E_VPINT_LNKLST0(vsi->user_param),
1754                                        (base_queue <<
1755                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1756                                        (0x0 <<
1757                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1758                 } else {
1759                         /* num_msix_vectors_vf needs to minus irq0 */
1760                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1761                                 vsi->user_param + (msix_vect - 1);
1762
1763                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1764                                        (base_queue <<
1765                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1766                                        (0x0 <<
1767                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1768                 }
1769         }
1770
1771         I40E_WRITE_FLUSH(hw);
1772 }
1773
1774 void
1775 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
1776 {
1777         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1778         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1779         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1780         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1781         uint16_t msix_vect = vsi->msix_intr;
1782         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1783         uint16_t queue_idx = 0;
1784         int record = 0;
1785         uint32_t val;
1786         int i;
1787
1788         for (i = 0; i < vsi->nb_qps; i++) {
1789                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1790                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1791         }
1792
1793         /* INTENA flag is not auto-cleared for interrupt */
1794         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
1795         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
1796                 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
1797                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
1798         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
1799
1800         /* VF bind interrupt */
1801         if (vsi->type == I40E_VSI_SRIOV) {
1802                 __vsi_queues_bind_intr(vsi, msix_vect,
1803                                        vsi->base_queue, vsi->nb_qps,
1804                                        itr_idx);
1805                 return;
1806         }
1807
1808         /* PF & VMDq bind interrupt */
1809         if (rte_intr_dp_is_en(intr_handle)) {
1810                 if (vsi->type == I40E_VSI_MAIN) {
1811                         queue_idx = 0;
1812                         record = 1;
1813                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1814                         struct i40e_vsi *main_vsi =
1815                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1816                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1817                         record = 1;
1818                 }
1819         }
1820
1821         for (i = 0; i < vsi->nb_used_qps; i++) {
1822                 if (nb_msix <= 1) {
1823                         if (!rte_intr_allow_others(intr_handle))
1824                                 /* allow to share MISC_VEC_ID */
1825                                 msix_vect = I40E_MISC_VEC_ID;
1826
1827                         /* no enough msix_vect, map all to one */
1828                         __vsi_queues_bind_intr(vsi, msix_vect,
1829                                                vsi->base_queue + i,
1830                                                vsi->nb_used_qps - i,
1831                                                itr_idx);
1832                         for (; !!record && i < vsi->nb_used_qps; i++)
1833                                 intr_handle->intr_vec[queue_idx + i] =
1834                                         msix_vect;
1835                         break;
1836                 }
1837                 /* 1:1 queue/msix_vect mapping */
1838                 __vsi_queues_bind_intr(vsi, msix_vect,
1839                                        vsi->base_queue + i, 1,
1840                                        itr_idx);
1841                 if (!!record)
1842                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1843
1844                 msix_vect++;
1845                 nb_msix--;
1846         }
1847 }
1848
1849 static void
1850 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1851 {
1852         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1853         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1854         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1855         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1856         uint16_t interval = i40e_calc_itr_interval(\
1857                 RTE_LIBRTE_I40E_ITR_INTERVAL, 1);
1858         uint16_t msix_intr, i;
1859
1860         if (rte_intr_allow_others(intr_handle))
1861                 for (i = 0; i < vsi->nb_msix; i++) {
1862                         msix_intr = vsi->msix_intr + i;
1863                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1864                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1865                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1866                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1867                                 (interval <<
1868                                  I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
1869                 }
1870         else
1871                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1872                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1873                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1874                                (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1875                                (interval <<
1876                                 I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
1877
1878         I40E_WRITE_FLUSH(hw);
1879 }
1880
1881 static void
1882 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1883 {
1884         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1885         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1886         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1887         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1888         uint16_t msix_intr, i;
1889
1890         if (rte_intr_allow_others(intr_handle))
1891                 for (i = 0; i < vsi->nb_msix; i++) {
1892                         msix_intr = vsi->msix_intr + i;
1893                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1894                                        0);
1895                 }
1896         else
1897                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
1898
1899         I40E_WRITE_FLUSH(hw);
1900 }
1901
1902 static inline uint8_t
1903 i40e_parse_link_speeds(uint16_t link_speeds)
1904 {
1905         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1906
1907         if (link_speeds & ETH_LINK_SPEED_40G)
1908                 link_speed |= I40E_LINK_SPEED_40GB;
1909         if (link_speeds & ETH_LINK_SPEED_25G)
1910                 link_speed |= I40E_LINK_SPEED_25GB;
1911         if (link_speeds & ETH_LINK_SPEED_20G)
1912                 link_speed |= I40E_LINK_SPEED_20GB;
1913         if (link_speeds & ETH_LINK_SPEED_10G)
1914                 link_speed |= I40E_LINK_SPEED_10GB;
1915         if (link_speeds & ETH_LINK_SPEED_1G)
1916                 link_speed |= I40E_LINK_SPEED_1GB;
1917         if (link_speeds & ETH_LINK_SPEED_100M)
1918                 link_speed |= I40E_LINK_SPEED_100MB;
1919
1920         return link_speed;
1921 }
1922
1923 static int
1924 i40e_phy_conf_link(struct i40e_hw *hw,
1925                    uint8_t abilities,
1926                    uint8_t force_speed,
1927                    bool is_up)
1928 {
1929         enum i40e_status_code status;
1930         struct i40e_aq_get_phy_abilities_resp phy_ab;
1931         struct i40e_aq_set_phy_config phy_conf;
1932         enum i40e_aq_phy_type cnt;
1933         uint32_t phy_type_mask = 0;
1934
1935         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1936                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1937                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1938                         I40E_AQ_PHY_FLAG_LOW_POWER;
1939         const uint8_t advt = I40E_LINK_SPEED_40GB |
1940                         I40E_LINK_SPEED_25GB |
1941                         I40E_LINK_SPEED_10GB |
1942                         I40E_LINK_SPEED_1GB |
1943                         I40E_LINK_SPEED_100MB;
1944         int ret = -ENOTSUP;
1945
1946
1947         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1948                                               NULL);
1949         if (status)
1950                 return ret;
1951
1952         /* If link already up, no need to set up again */
1953         if (is_up && phy_ab.phy_type != 0)
1954                 return I40E_SUCCESS;
1955
1956         memset(&phy_conf, 0, sizeof(phy_conf));
1957
1958         /* bits 0-2 use the values from get_phy_abilities_resp */
1959         abilities &= ~mask;
1960         abilities |= phy_ab.abilities & mask;
1961
1962         /* update ablities and speed */
1963         if (abilities & I40E_AQ_PHY_AN_ENABLED)
1964                 phy_conf.link_speed = advt;
1965         else
1966                 phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
1967
1968         phy_conf.abilities = abilities;
1969
1970
1971
1972         /* To enable link, phy_type mask needs to include each type */
1973         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
1974                 phy_type_mask |= 1 << cnt;
1975
1976         /* use get_phy_abilities_resp value for the rest */
1977         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
1978         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
1979                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
1980                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
1981         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
1982         phy_conf.eee_capability = phy_ab.eee_capability;
1983         phy_conf.eeer = phy_ab.eeer_val;
1984         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1985
1986         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1987                     phy_ab.abilities, phy_ab.link_speed);
1988         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
1989                     phy_conf.abilities, phy_conf.link_speed);
1990
1991         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
1992         if (status)
1993                 return ret;
1994
1995         return I40E_SUCCESS;
1996 }
1997
1998 static int
1999 i40e_apply_link_speed(struct rte_eth_dev *dev)
2000 {
2001         uint8_t speed;
2002         uint8_t abilities = 0;
2003         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2004         struct rte_eth_conf *conf = &dev->data->dev_conf;
2005
2006         speed = i40e_parse_link_speeds(conf->link_speeds);
2007         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2008         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
2009                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2010         abilities |= I40E_AQ_PHY_LINK_ENABLED;
2011
2012         return i40e_phy_conf_link(hw, abilities, speed, true);
2013 }
2014
2015 static int
2016 i40e_dev_start(struct rte_eth_dev *dev)
2017 {
2018         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2019         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2020         struct i40e_vsi *main_vsi = pf->main_vsi;
2021         int ret, i;
2022         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2023         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2024         uint32_t intr_vector = 0;
2025         struct i40e_vsi *vsi;
2026
2027         hw->adapter_stopped = 0;
2028
2029         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2030                 PMD_INIT_LOG(ERR,
2031                 "Invalid link_speeds for port %u, autonegotiation disabled",
2032                               dev->data->port_id);
2033                 return -EINVAL;
2034         }
2035
2036         rte_intr_disable(intr_handle);
2037
2038         if ((rte_intr_cap_multiple(intr_handle) ||
2039              !RTE_ETH_DEV_SRIOV(dev).active) &&
2040             dev->data->dev_conf.intr_conf.rxq != 0) {
2041                 intr_vector = dev->data->nb_rx_queues;
2042                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2043                 if (ret)
2044                         return ret;
2045         }
2046
2047         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2048                 intr_handle->intr_vec =
2049                         rte_zmalloc("intr_vec",
2050                                     dev->data->nb_rx_queues * sizeof(int),
2051                                     0);
2052                 if (!intr_handle->intr_vec) {
2053                         PMD_INIT_LOG(ERR,
2054                                 "Failed to allocate %d rx_queues intr_vec",
2055                                 dev->data->nb_rx_queues);
2056                         return -ENOMEM;
2057                 }
2058         }
2059
2060         /* Initialize VSI */
2061         ret = i40e_dev_rxtx_init(pf);
2062         if (ret != I40E_SUCCESS) {
2063                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2064                 goto err_up;
2065         }
2066
2067         /* Map queues with MSIX interrupt */
2068         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2069                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2070         i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2071         i40e_vsi_enable_queues_intr(main_vsi);
2072
2073         /* Map VMDQ VSI queues with MSIX interrupt */
2074         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2075                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2076                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2077                                           I40E_ITR_INDEX_DEFAULT);
2078                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2079         }
2080
2081         /* enable FDIR MSIX interrupt */
2082         if (pf->fdir.fdir_vsi) {
2083                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
2084                                           I40E_ITR_INDEX_NONE);
2085                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
2086         }
2087
2088         /* Enable all queues which have been configured */
2089         ret = i40e_dev_switch_queues(pf, TRUE);
2090
2091         if (ret != I40E_SUCCESS) {
2092                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
2093                 goto err_up;
2094         }
2095
2096         /* Enable receiving broadcast packets */
2097         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2098         if (ret != I40E_SUCCESS)
2099                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2100
2101         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2102                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2103                                                 true, NULL);
2104                 if (ret != I40E_SUCCESS)
2105                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2106         }
2107
2108         /* Enable the VLAN promiscuous mode. */
2109         if (pf->vfs) {
2110                 for (i = 0; i < pf->vf_num; i++) {
2111                         vsi = pf->vfs[i].vsi;
2112                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2113                                                      true, NULL);
2114                 }
2115         }
2116
2117         /* Enable mac loopback mode */
2118         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2119             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2120                 ret = i40e_diag_set_loopback(hw, dev->data->dev_conf.lpbk_mode);
2121                 if (ret != I40E_SUCCESS) {
2122                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2123                         goto err_up;
2124                 }
2125         }
2126
2127         /* Apply link configure */
2128         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
2129                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
2130                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
2131                                 ETH_LINK_SPEED_40G)) {
2132                 PMD_DRV_LOG(ERR, "Invalid link setting");
2133                 goto err_up;
2134         }
2135         ret = i40e_apply_link_speed(dev);
2136         if (I40E_SUCCESS != ret) {
2137                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2138                 goto err_up;
2139         }
2140
2141         if (!rte_intr_allow_others(intr_handle)) {
2142                 rte_intr_callback_unregister(intr_handle,
2143                                              i40e_dev_interrupt_handler,
2144                                              (void *)dev);
2145                 /* configure and enable device interrupt */
2146                 i40e_pf_config_irq0(hw, FALSE);
2147                 i40e_pf_enable_irq0(hw);
2148
2149                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2150                         PMD_INIT_LOG(INFO,
2151                                 "lsc won't enable because of no intr multiplex");
2152         } else {
2153                 ret = i40e_aq_set_phy_int_mask(hw,
2154                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2155                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2156                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2157                 if (ret != I40E_SUCCESS)
2158                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2159
2160                 /* Call get_link_info aq commond to enable/disable LSE */
2161                 i40e_dev_link_update(dev, 0);
2162         }
2163
2164         /* enable uio intr after callback register */
2165         rte_intr_enable(intr_handle);
2166
2167         i40e_filter_restore(pf);
2168
2169         if (pf->tm_conf.root && !pf->tm_conf.committed)
2170                 PMD_DRV_LOG(WARNING,
2171                             "please call hierarchy_commit() "
2172                             "before starting the port");
2173
2174         return I40E_SUCCESS;
2175
2176 err_up:
2177         i40e_dev_switch_queues(pf, FALSE);
2178         i40e_dev_clear_queues(dev);
2179
2180         return ret;
2181 }
2182
2183 static void
2184 i40e_dev_stop(struct rte_eth_dev *dev)
2185 {
2186         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2187         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2188         struct i40e_vsi *main_vsi = pf->main_vsi;
2189         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2190         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2191         int i;
2192
2193         if (hw->adapter_stopped == 1)
2194                 return;
2195         /* Disable all queues */
2196         i40e_dev_switch_queues(pf, FALSE);
2197
2198         /* un-map queues with interrupt registers */
2199         i40e_vsi_disable_queues_intr(main_vsi);
2200         i40e_vsi_queues_unbind_intr(main_vsi);
2201
2202         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2203                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2204                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2205         }
2206
2207         if (pf->fdir.fdir_vsi) {
2208                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2209                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2210         }
2211         /* Clear all queues and release memory */
2212         i40e_dev_clear_queues(dev);
2213
2214         /* Set link down */
2215         i40e_dev_set_link_down(dev);
2216
2217         if (!rte_intr_allow_others(intr_handle))
2218                 /* resume to the default handler */
2219                 rte_intr_callback_register(intr_handle,
2220                                            i40e_dev_interrupt_handler,
2221                                            (void *)dev);
2222
2223         /* Clean datapath event and queue/vec mapping */
2224         rte_intr_efd_disable(intr_handle);
2225         if (intr_handle->intr_vec) {
2226                 rte_free(intr_handle->intr_vec);
2227                 intr_handle->intr_vec = NULL;
2228         }
2229
2230         /* reset hierarchy commit */
2231         pf->tm_conf.committed = false;
2232
2233         hw->adapter_stopped = 1;
2234 }
2235
2236 static void
2237 i40e_dev_close(struct rte_eth_dev *dev)
2238 {
2239         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2240         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2241         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2242         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2243         struct i40e_mirror_rule *p_mirror;
2244         uint32_t reg;
2245         int i;
2246         int ret;
2247
2248         PMD_INIT_FUNC_TRACE();
2249
2250         i40e_dev_stop(dev);
2251
2252         /* Remove all mirror rules */
2253         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2254                 ret = i40e_aq_del_mirror_rule(hw,
2255                                               pf->main_vsi->veb->seid,
2256                                               p_mirror->rule_type,
2257                                               p_mirror->entries,
2258                                               p_mirror->num_entries,
2259                                               p_mirror->id);
2260                 if (ret < 0)
2261                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2262                                     "status = %d, aq_err = %d.", ret,
2263                                     hw->aq.asq_last_status);
2264
2265                 /* remove mirror software resource anyway */
2266                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2267                 rte_free(p_mirror);
2268                 pf->nb_mirror_rule--;
2269         }
2270
2271         i40e_dev_free_queues(dev);
2272
2273         /* Disable interrupt */
2274         i40e_pf_disable_irq0(hw);
2275         rte_intr_disable(intr_handle);
2276
2277         /* shutdown and destroy the HMC */
2278         i40e_shutdown_lan_hmc(hw);
2279
2280         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2281                 i40e_vsi_release(pf->vmdq[i].vsi);
2282                 pf->vmdq[i].vsi = NULL;
2283         }
2284         rte_free(pf->vmdq);
2285         pf->vmdq = NULL;
2286
2287         /* release all the existing VSIs and VEBs */
2288         i40e_fdir_teardown(pf);
2289         i40e_vsi_release(pf->main_vsi);
2290
2291         /* shutdown the adminq */
2292         i40e_aq_queue_shutdown(hw, true);
2293         i40e_shutdown_adminq(hw);
2294
2295         i40e_res_pool_destroy(&pf->qp_pool);
2296         i40e_res_pool_destroy(&pf->msix_pool);
2297
2298         /* Disable flexible payload in global configuration */
2299         if (!pf->support_multi_driver)
2300                 i40e_flex_payload_reg_set_default(hw);
2301
2302         /* force a PF reset to clean anything leftover */
2303         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2304         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2305                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2306         I40E_WRITE_FLUSH(hw);
2307 }
2308
2309 /*
2310  * Reset PF device only to re-initialize resources in PMD layer
2311  */
2312 static int
2313 i40e_dev_reset(struct rte_eth_dev *dev)
2314 {
2315         int ret;
2316
2317         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2318          * its VF to make them align with it. The detailed notification
2319          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2320          * To avoid unexpected behavior in VF, currently reset of PF with
2321          * SR-IOV activation is not supported. It might be supported later.
2322          */
2323         if (dev->data->sriov.active)
2324                 return -ENOTSUP;
2325
2326         ret = eth_i40e_dev_uninit(dev);
2327         if (ret)
2328                 return ret;
2329
2330         ret = eth_i40e_dev_init(dev);
2331
2332         return ret;
2333 }
2334
2335 static void
2336 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2337 {
2338         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2339         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2340         struct i40e_vsi *vsi = pf->main_vsi;
2341         int status;
2342
2343         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2344                                                      true, NULL, true);
2345         if (status != I40E_SUCCESS)
2346                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2347
2348         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2349                                                         TRUE, NULL);
2350         if (status != I40E_SUCCESS)
2351                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2352
2353 }
2354
2355 static void
2356 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2357 {
2358         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2359         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2360         struct i40e_vsi *vsi = pf->main_vsi;
2361         int status;
2362
2363         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2364                                                      false, NULL, true);
2365         if (status != I40E_SUCCESS)
2366                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2367
2368         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2369                                                         false, NULL);
2370         if (status != I40E_SUCCESS)
2371                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2372 }
2373
2374 static void
2375 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2376 {
2377         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2378         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2379         struct i40e_vsi *vsi = pf->main_vsi;
2380         int ret;
2381
2382         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2383         if (ret != I40E_SUCCESS)
2384                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2385 }
2386
2387 static void
2388 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2389 {
2390         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2391         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2392         struct i40e_vsi *vsi = pf->main_vsi;
2393         int ret;
2394
2395         if (dev->data->promiscuous == 1)
2396                 return; /* must remain in all_multicast mode */
2397
2398         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2399                                 vsi->seid, FALSE, NULL);
2400         if (ret != I40E_SUCCESS)
2401                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2402 }
2403
2404 /*
2405  * Set device link up.
2406  */
2407 static int
2408 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2409 {
2410         /* re-apply link speed setting */
2411         return i40e_apply_link_speed(dev);
2412 }
2413
2414 /*
2415  * Set device link down.
2416  */
2417 static int
2418 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2419 {
2420         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2421         uint8_t abilities = 0;
2422         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2423
2424         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2425         return i40e_phy_conf_link(hw, abilities, speed, false);
2426 }
2427
2428 int
2429 i40e_dev_link_update(struct rte_eth_dev *dev,
2430                      int wait_to_complete)
2431 {
2432 #define CHECK_INTERVAL 100  /* 100ms */
2433 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
2434         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2435         struct i40e_link_status link_status;
2436         struct rte_eth_link link, old;
2437         int status;
2438         unsigned rep_cnt = MAX_REPEAT_TIME;
2439         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2440
2441         memset(&link, 0, sizeof(link));
2442         memset(&old, 0, sizeof(old));
2443         memset(&link_status, 0, sizeof(link_status));
2444         rte_i40e_dev_atomic_read_link_status(dev, &old);
2445
2446         do {
2447                 /* Get link status information from hardware */
2448                 status = i40e_aq_get_link_info(hw, enable_lse,
2449                                                 &link_status, NULL);
2450                 if (status != I40E_SUCCESS) {
2451                         link.link_speed = ETH_SPEED_NUM_100M;
2452                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2453                         PMD_DRV_LOG(ERR, "Failed to get link info");
2454                         goto out;
2455                 }
2456
2457                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
2458                 if (!wait_to_complete || link.link_status)
2459                         break;
2460
2461                 rte_delay_ms(CHECK_INTERVAL);
2462         } while (--rep_cnt);
2463
2464         if (!link.link_status)
2465                 goto out;
2466
2467         /* i40e uses full duplex only */
2468         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2469
2470         /* Parse the link status */
2471         switch (link_status.link_speed) {
2472         case I40E_LINK_SPEED_100MB:
2473                 link.link_speed = ETH_SPEED_NUM_100M;
2474                 break;
2475         case I40E_LINK_SPEED_1GB:
2476                 link.link_speed = ETH_SPEED_NUM_1G;
2477                 break;
2478         case I40E_LINK_SPEED_10GB:
2479                 link.link_speed = ETH_SPEED_NUM_10G;
2480                 break;
2481         case I40E_LINK_SPEED_20GB:
2482                 link.link_speed = ETH_SPEED_NUM_20G;
2483                 break;
2484         case I40E_LINK_SPEED_25GB:
2485                 link.link_speed = ETH_SPEED_NUM_25G;
2486                 break;
2487         case I40E_LINK_SPEED_40GB:
2488                 link.link_speed = ETH_SPEED_NUM_40G;
2489                 break;
2490         default:
2491                 link.link_speed = ETH_SPEED_NUM_100M;
2492                 break;
2493         }
2494
2495         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2496                         ETH_LINK_SPEED_FIXED);
2497
2498 out:
2499         rte_i40e_dev_atomic_write_link_status(dev, &link);
2500         if (link.link_status == old.link_status)
2501                 return -1;
2502
2503         i40e_notify_all_vfs_link_status(dev);
2504
2505         return 0;
2506 }
2507
2508 /* Get all the statistics of a VSI */
2509 void
2510 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2511 {
2512         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2513         struct i40e_eth_stats *nes = &vsi->eth_stats;
2514         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2515         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2516
2517         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2518                             vsi->offset_loaded, &oes->rx_bytes,
2519                             &nes->rx_bytes);
2520         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2521                             vsi->offset_loaded, &oes->rx_unicast,
2522                             &nes->rx_unicast);
2523         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2524                             vsi->offset_loaded, &oes->rx_multicast,
2525                             &nes->rx_multicast);
2526         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2527                             vsi->offset_loaded, &oes->rx_broadcast,
2528                             &nes->rx_broadcast);
2529         /* exclude CRC bytes */
2530         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2531                 nes->rx_broadcast) * ETHER_CRC_LEN;
2532
2533         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2534                             &oes->rx_discards, &nes->rx_discards);
2535         /* GLV_REPC not supported */
2536         /* GLV_RMPC not supported */
2537         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2538                             &oes->rx_unknown_protocol,
2539                             &nes->rx_unknown_protocol);
2540         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2541                             vsi->offset_loaded, &oes->tx_bytes,
2542                             &nes->tx_bytes);
2543         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2544                             vsi->offset_loaded, &oes->tx_unicast,
2545                             &nes->tx_unicast);
2546         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2547                             vsi->offset_loaded, &oes->tx_multicast,
2548                             &nes->tx_multicast);
2549         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2550                             vsi->offset_loaded,  &oes->tx_broadcast,
2551                             &nes->tx_broadcast);
2552         /* GLV_TDPC not supported */
2553         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2554                             &oes->tx_errors, &nes->tx_errors);
2555         vsi->offset_loaded = true;
2556
2557         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2558                     vsi->vsi_id);
2559         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2560         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2561         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2562         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2563         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2564         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2565                     nes->rx_unknown_protocol);
2566         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2567         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2568         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2569         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2570         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2571         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2572         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2573                     vsi->vsi_id);
2574 }
2575
2576 static void
2577 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2578 {
2579         unsigned int i;
2580         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2581         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2582
2583         /* Get rx/tx bytes of internal transfer packets */
2584         i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2585                         I40E_GLV_GORCL(hw->port),
2586                         pf->offset_loaded,
2587                         &pf->internal_stats_offset.rx_bytes,
2588                         &pf->internal_stats.rx_bytes);
2589
2590         i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2591                         I40E_GLV_GOTCL(hw->port),
2592                         pf->offset_loaded,
2593                         &pf->internal_stats_offset.tx_bytes,
2594                         &pf->internal_stats.tx_bytes);
2595         /* Get total internal rx packet count */
2596         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2597                             I40E_GLV_UPRCL(hw->port),
2598                             pf->offset_loaded,
2599                             &pf->internal_stats_offset.rx_unicast,
2600                             &pf->internal_stats.rx_unicast);
2601         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2602                             I40E_GLV_MPRCL(hw->port),
2603                             pf->offset_loaded,
2604                             &pf->internal_stats_offset.rx_multicast,
2605                             &pf->internal_stats.rx_multicast);
2606         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2607                             I40E_GLV_BPRCL(hw->port),
2608                             pf->offset_loaded,
2609                             &pf->internal_stats_offset.rx_broadcast,
2610                             &pf->internal_stats.rx_broadcast);
2611         /* Get total internal tx packet count */
2612         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
2613                             I40E_GLV_UPTCL(hw->port),
2614                             pf->offset_loaded,
2615                             &pf->internal_stats_offset.tx_unicast,
2616                             &pf->internal_stats.tx_unicast);
2617         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
2618                             I40E_GLV_MPTCL(hw->port),
2619                             pf->offset_loaded,
2620                             &pf->internal_stats_offset.tx_multicast,
2621                             &pf->internal_stats.tx_multicast);
2622         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
2623                             I40E_GLV_BPTCL(hw->port),
2624                             pf->offset_loaded,
2625                             &pf->internal_stats_offset.tx_broadcast,
2626                             &pf->internal_stats.tx_broadcast);
2627
2628         /* exclude CRC size */
2629         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2630                 pf->internal_stats.rx_multicast +
2631                 pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2632
2633         /* Get statistics of struct i40e_eth_stats */
2634         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2635                             I40E_GLPRT_GORCL(hw->port),
2636                             pf->offset_loaded, &os->eth.rx_bytes,
2637                             &ns->eth.rx_bytes);
2638         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2639                             I40E_GLPRT_UPRCL(hw->port),
2640                             pf->offset_loaded, &os->eth.rx_unicast,
2641                             &ns->eth.rx_unicast);
2642         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2643                             I40E_GLPRT_MPRCL(hw->port),
2644                             pf->offset_loaded, &os->eth.rx_multicast,
2645                             &ns->eth.rx_multicast);
2646         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2647                             I40E_GLPRT_BPRCL(hw->port),
2648                             pf->offset_loaded, &os->eth.rx_broadcast,
2649                             &ns->eth.rx_broadcast);
2650         /* Workaround: CRC size should not be included in byte statistics,
2651          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2652          */
2653         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2654                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2655
2656         /* exclude internal rx bytes
2657          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2658          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
2659          * value.
2660          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
2661          */
2662         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2663                 ns->eth.rx_bytes = 0;
2664         else
2665                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2666
2667         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
2668                 ns->eth.rx_unicast = 0;
2669         else
2670                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
2671
2672         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
2673                 ns->eth.rx_multicast = 0;
2674         else
2675                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
2676
2677         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
2678                 ns->eth.rx_broadcast = 0;
2679         else
2680                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
2681
2682         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2683                             pf->offset_loaded, &os->eth.rx_discards,
2684                             &ns->eth.rx_discards);
2685         /* GLPRT_REPC not supported */
2686         /* GLPRT_RMPC not supported */
2687         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2688                             pf->offset_loaded,
2689                             &os->eth.rx_unknown_protocol,
2690                             &ns->eth.rx_unknown_protocol);
2691         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2692                             I40E_GLPRT_GOTCL(hw->port),
2693                             pf->offset_loaded, &os->eth.tx_bytes,
2694                             &ns->eth.tx_bytes);
2695         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2696                             I40E_GLPRT_UPTCL(hw->port),
2697                             pf->offset_loaded, &os->eth.tx_unicast,
2698                             &ns->eth.tx_unicast);
2699         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2700                             I40E_GLPRT_MPTCL(hw->port),
2701                             pf->offset_loaded, &os->eth.tx_multicast,
2702                             &ns->eth.tx_multicast);
2703         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2704                             I40E_GLPRT_BPTCL(hw->port),
2705                             pf->offset_loaded, &os->eth.tx_broadcast,
2706                             &ns->eth.tx_broadcast);
2707         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2708                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2709
2710         /* exclude internal tx bytes
2711          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
2712          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
2713          * value.
2714          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
2715          */
2716         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
2717                 ns->eth.tx_bytes = 0;
2718         else
2719                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
2720
2721         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
2722                 ns->eth.tx_unicast = 0;
2723         else
2724                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
2725
2726         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
2727                 ns->eth.tx_multicast = 0;
2728         else
2729                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
2730
2731         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
2732                 ns->eth.tx_broadcast = 0;
2733         else
2734                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
2735
2736         /* GLPRT_TEPC not supported */
2737
2738         /* additional port specific stats */
2739         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2740                             pf->offset_loaded, &os->tx_dropped_link_down,
2741                             &ns->tx_dropped_link_down);
2742         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2743                             pf->offset_loaded, &os->crc_errors,
2744                             &ns->crc_errors);
2745         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2746                             pf->offset_loaded, &os->illegal_bytes,
2747                             &ns->illegal_bytes);
2748         /* GLPRT_ERRBC not supported */
2749         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2750                             pf->offset_loaded, &os->mac_local_faults,
2751                             &ns->mac_local_faults);
2752         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2753                             pf->offset_loaded, &os->mac_remote_faults,
2754                             &ns->mac_remote_faults);
2755         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2756                             pf->offset_loaded, &os->rx_length_errors,
2757                             &ns->rx_length_errors);
2758         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2759                             pf->offset_loaded, &os->link_xon_rx,
2760                             &ns->link_xon_rx);
2761         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2762                             pf->offset_loaded, &os->link_xoff_rx,
2763                             &ns->link_xoff_rx);
2764         for (i = 0; i < 8; i++) {
2765                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2766                                     pf->offset_loaded,
2767                                     &os->priority_xon_rx[i],
2768                                     &ns->priority_xon_rx[i]);
2769                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2770                                     pf->offset_loaded,
2771                                     &os->priority_xoff_rx[i],
2772                                     &ns->priority_xoff_rx[i]);
2773         }
2774         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2775                             pf->offset_loaded, &os->link_xon_tx,
2776                             &ns->link_xon_tx);
2777         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2778                             pf->offset_loaded, &os->link_xoff_tx,
2779                             &ns->link_xoff_tx);
2780         for (i = 0; i < 8; i++) {
2781                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2782                                     pf->offset_loaded,
2783                                     &os->priority_xon_tx[i],
2784                                     &ns->priority_xon_tx[i]);
2785                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2786                                     pf->offset_loaded,
2787                                     &os->priority_xoff_tx[i],
2788                                     &ns->priority_xoff_tx[i]);
2789                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2790                                     pf->offset_loaded,
2791                                     &os->priority_xon_2_xoff[i],
2792                                     &ns->priority_xon_2_xoff[i]);
2793         }
2794         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2795                             I40E_GLPRT_PRC64L(hw->port),
2796                             pf->offset_loaded, &os->rx_size_64,
2797                             &ns->rx_size_64);
2798         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2799                             I40E_GLPRT_PRC127L(hw->port),
2800                             pf->offset_loaded, &os->rx_size_127,
2801                             &ns->rx_size_127);
2802         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2803                             I40E_GLPRT_PRC255L(hw->port),
2804                             pf->offset_loaded, &os->rx_size_255,
2805                             &ns->rx_size_255);
2806         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2807                             I40E_GLPRT_PRC511L(hw->port),
2808                             pf->offset_loaded, &os->rx_size_511,
2809                             &ns->rx_size_511);
2810         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2811                             I40E_GLPRT_PRC1023L(hw->port),
2812                             pf->offset_loaded, &os->rx_size_1023,
2813                             &ns->rx_size_1023);
2814         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2815                             I40E_GLPRT_PRC1522L(hw->port),
2816                             pf->offset_loaded, &os->rx_size_1522,
2817                             &ns->rx_size_1522);
2818         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2819                             I40E_GLPRT_PRC9522L(hw->port),
2820                             pf->offset_loaded, &os->rx_size_big,
2821                             &ns->rx_size_big);
2822         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2823                             pf->offset_loaded, &os->rx_undersize,
2824                             &ns->rx_undersize);
2825         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2826                             pf->offset_loaded, &os->rx_fragments,
2827                             &ns->rx_fragments);
2828         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2829                             pf->offset_loaded, &os->rx_oversize,
2830                             &ns->rx_oversize);
2831         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2832                             pf->offset_loaded, &os->rx_jabber,
2833                             &ns->rx_jabber);
2834         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2835                             I40E_GLPRT_PTC64L(hw->port),
2836                             pf->offset_loaded, &os->tx_size_64,
2837                             &ns->tx_size_64);
2838         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2839                             I40E_GLPRT_PTC127L(hw->port),
2840                             pf->offset_loaded, &os->tx_size_127,
2841                             &ns->tx_size_127);
2842         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2843                             I40E_GLPRT_PTC255L(hw->port),
2844                             pf->offset_loaded, &os->tx_size_255,
2845                             &ns->tx_size_255);
2846         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2847                             I40E_GLPRT_PTC511L(hw->port),
2848                             pf->offset_loaded, &os->tx_size_511,
2849                             &ns->tx_size_511);
2850         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2851                             I40E_GLPRT_PTC1023L(hw->port),
2852                             pf->offset_loaded, &os->tx_size_1023,
2853                             &ns->tx_size_1023);
2854         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2855                             I40E_GLPRT_PTC1522L(hw->port),
2856                             pf->offset_loaded, &os->tx_size_1522,
2857                             &ns->tx_size_1522);
2858         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2859                             I40E_GLPRT_PTC9522L(hw->port),
2860                             pf->offset_loaded, &os->tx_size_big,
2861                             &ns->tx_size_big);
2862         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2863                            pf->offset_loaded,
2864                            &os->fd_sb_match, &ns->fd_sb_match);
2865         /* GLPRT_MSPDC not supported */
2866         /* GLPRT_XEC not supported */
2867
2868         pf->offset_loaded = true;
2869
2870         if (pf->main_vsi)
2871                 i40e_update_vsi_stats(pf->main_vsi);
2872 }
2873
2874 /* Get all statistics of a port */
2875 static int
2876 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2877 {
2878         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2879         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2880         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2881         unsigned i;
2882
2883         /* call read registers - updates values, now write them to struct */
2884         i40e_read_stats_registers(pf, hw);
2885
2886         stats->ipackets = ns->eth.rx_unicast +
2887                         ns->eth.rx_multicast +
2888                         ns->eth.rx_broadcast -
2889                         ns->eth.rx_discards -
2890                         pf->main_vsi->eth_stats.rx_discards;
2891         stats->opackets = ns->eth.tx_unicast +
2892                         ns->eth.tx_multicast +
2893                         ns->eth.tx_broadcast;
2894         stats->ibytes   = ns->eth.rx_bytes;
2895         stats->obytes   = ns->eth.tx_bytes;
2896         stats->oerrors  = ns->eth.tx_errors +
2897                         pf->main_vsi->eth_stats.tx_errors;
2898
2899         /* Rx Errors */
2900         stats->imissed  = ns->eth.rx_discards +
2901                         pf->main_vsi->eth_stats.rx_discards;
2902         stats->ierrors  = ns->crc_errors +
2903                         ns->rx_length_errors + ns->rx_undersize +
2904                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2905
2906         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2907         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2908         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2909         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2910         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2911         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2912         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2913                     ns->eth.rx_unknown_protocol);
2914         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2915         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2916         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2917         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2918         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2919         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2920
2921         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2922                     ns->tx_dropped_link_down);
2923         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2924         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2925                     ns->illegal_bytes);
2926         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2927         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2928                     ns->mac_local_faults);
2929         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2930                     ns->mac_remote_faults);
2931         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2932                     ns->rx_length_errors);
2933         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2934         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2935         for (i = 0; i < 8; i++) {
2936                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
2937                                 i, ns->priority_xon_rx[i]);
2938                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
2939                                 i, ns->priority_xoff_rx[i]);
2940         }
2941         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
2942         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
2943         for (i = 0; i < 8; i++) {
2944                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
2945                                 i, ns->priority_xon_tx[i]);
2946                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
2947                                 i, ns->priority_xoff_tx[i]);
2948                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
2949                                 i, ns->priority_xon_2_xoff[i]);
2950         }
2951         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
2952         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
2953         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
2954         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
2955         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
2956         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
2957         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
2958         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
2959         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
2960         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
2961         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
2962         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
2963         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
2964         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
2965         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
2966         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
2967         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
2968         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
2969         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2970                         ns->mac_short_packet_dropped);
2971         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
2972                     ns->checksum_error);
2973         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
2974         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2975         return 0;
2976 }
2977
2978 /* Reset the statistics */
2979 static void
2980 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2981 {
2982         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2983         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2984
2985         /* Mark PF and VSI stats to update the offset, aka "reset" */
2986         pf->offset_loaded = false;
2987         if (pf->main_vsi)
2988                 pf->main_vsi->offset_loaded = false;
2989
2990         /* read the stats, reading current register values into offset */
2991         i40e_read_stats_registers(pf, hw);
2992 }
2993
2994 static uint32_t
2995 i40e_xstats_calc_num(void)
2996 {
2997         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
2998                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
2999                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3000 }
3001
3002 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3003                                      struct rte_eth_xstat_name *xstats_names,
3004                                      __rte_unused unsigned limit)
3005 {
3006         unsigned count = 0;
3007         unsigned i, prio;
3008
3009         if (xstats_names == NULL)
3010                 return i40e_xstats_calc_num();
3011
3012         /* Note: limit checked in rte_eth_xstats_names() */
3013
3014         /* Get stats from i40e_eth_stats struct */
3015         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3016                 snprintf(xstats_names[count].name,
3017                          sizeof(xstats_names[count].name),
3018                          "%s", rte_i40e_stats_strings[i].name);
3019                 count++;
3020         }
3021
3022         /* Get individiual stats from i40e_hw_port struct */
3023         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3024                 snprintf(xstats_names[count].name,
3025                         sizeof(xstats_names[count].name),
3026                          "%s", rte_i40e_hw_port_strings[i].name);
3027                 count++;
3028         }
3029
3030         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3031                 for (prio = 0; prio < 8; prio++) {
3032                         snprintf(xstats_names[count].name,
3033                                  sizeof(xstats_names[count].name),
3034                                  "rx_priority%u_%s", prio,
3035                                  rte_i40e_rxq_prio_strings[i].name);
3036                         count++;
3037                 }
3038         }
3039
3040         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3041                 for (prio = 0; prio < 8; prio++) {
3042                         snprintf(xstats_names[count].name,
3043                                  sizeof(xstats_names[count].name),
3044                                  "tx_priority%u_%s", prio,
3045                                  rte_i40e_txq_prio_strings[i].name);
3046                         count++;
3047                 }
3048         }
3049         return count;
3050 }
3051
3052 static int
3053 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3054                     unsigned n)
3055 {
3056         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3057         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3058         unsigned i, count, prio;
3059         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3060
3061         count = i40e_xstats_calc_num();
3062         if (n < count)
3063                 return count;
3064
3065         i40e_read_stats_registers(pf, hw);
3066
3067         if (xstats == NULL)
3068                 return 0;
3069
3070         count = 0;
3071
3072         /* Get stats from i40e_eth_stats struct */
3073         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3074                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3075                         rte_i40e_stats_strings[i].offset);
3076                 xstats[count].id = count;
3077                 count++;
3078         }
3079
3080         /* Get individiual stats from i40e_hw_port struct */
3081         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3082                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3083                         rte_i40e_hw_port_strings[i].offset);
3084                 xstats[count].id = count;
3085                 count++;
3086         }
3087
3088         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3089                 for (prio = 0; prio < 8; prio++) {
3090                         xstats[count].value =
3091                                 *(uint64_t *)(((char *)hw_stats) +
3092                                 rte_i40e_rxq_prio_strings[i].offset +
3093                                 (sizeof(uint64_t) * prio));
3094                         xstats[count].id = count;
3095                         count++;
3096                 }
3097         }
3098
3099         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3100                 for (prio = 0; prio < 8; prio++) {
3101                         xstats[count].value =
3102                                 *(uint64_t *)(((char *)hw_stats) +
3103                                 rte_i40e_txq_prio_strings[i].offset +
3104                                 (sizeof(uint64_t) * prio));
3105                         xstats[count].id = count;
3106                         count++;
3107                 }
3108         }
3109
3110         return count;
3111 }
3112
3113 static int
3114 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
3115                                  __rte_unused uint16_t queue_id,
3116                                  __rte_unused uint8_t stat_idx,
3117                                  __rte_unused uint8_t is_rx)
3118 {
3119         PMD_INIT_FUNC_TRACE();
3120
3121         return -ENOSYS;
3122 }
3123
3124 static int
3125 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3126 {
3127         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3128         u32 full_ver;
3129         u8 ver, patch;
3130         u16 build;
3131         int ret;
3132
3133         full_ver = hw->nvm.oem_ver;
3134         ver = (u8)(full_ver >> 24);
3135         build = (u16)((full_ver >> 8) & 0xffff);
3136         patch = (u8)(full_ver & 0xff);
3137
3138         ret = snprintf(fw_version, fw_size,
3139                  "%d.%d%d 0x%08x %d.%d.%d",
3140                  ((hw->nvm.version >> 12) & 0xf),
3141                  ((hw->nvm.version >> 4) & 0xff),
3142                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3143                  ver, build, patch);
3144
3145         ret += 1; /* add the size of '\0' */
3146         if (fw_size < (u32)ret)
3147                 return ret;
3148         else
3149                 return 0;
3150 }
3151
3152 static void
3153 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3154 {
3155         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3156         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3157         struct i40e_vsi *vsi = pf->main_vsi;
3158         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3159
3160         dev_info->pci_dev = pci_dev;
3161         dev_info->max_rx_queues = vsi->nb_qps;
3162         dev_info->max_tx_queues = vsi->nb_qps;
3163         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3164         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3165         dev_info->max_mac_addrs = vsi->max_macaddrs;
3166         dev_info->max_vfs = pci_dev->max_vfs;
3167         dev_info->rx_offload_capa =
3168                 DEV_RX_OFFLOAD_VLAN_STRIP |
3169                 DEV_RX_OFFLOAD_QINQ_STRIP |
3170                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3171                 DEV_RX_OFFLOAD_UDP_CKSUM |
3172                 DEV_RX_OFFLOAD_TCP_CKSUM |
3173                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3174                 DEV_RX_OFFLOAD_CRC_STRIP;
3175         dev_info->tx_offload_capa =
3176                 DEV_TX_OFFLOAD_VLAN_INSERT |
3177                 DEV_TX_OFFLOAD_QINQ_INSERT |
3178                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3179                 DEV_TX_OFFLOAD_UDP_CKSUM |
3180                 DEV_TX_OFFLOAD_TCP_CKSUM |
3181                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3182                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3183                 DEV_TX_OFFLOAD_TCP_TSO |
3184                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3185                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3186                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3187                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
3188         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3189                                                 sizeof(uint32_t);
3190         dev_info->reta_size = pf->hash_lut_size;
3191         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3192
3193         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3194                 .rx_thresh = {
3195                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3196                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3197                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3198                 },
3199                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3200                 .rx_drop_en = 0,
3201         };
3202
3203         dev_info->default_txconf = (struct rte_eth_txconf) {
3204                 .tx_thresh = {
3205                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3206                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3207                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3208                 },
3209                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3210                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3211                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3212                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3213         };
3214
3215         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3216                 .nb_max = I40E_MAX_RING_DESC,
3217                 .nb_min = I40E_MIN_RING_DESC,
3218                 .nb_align = I40E_ALIGN_RING_DESC,
3219         };
3220
3221         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3222                 .nb_max = I40E_MAX_RING_DESC,
3223                 .nb_min = I40E_MIN_RING_DESC,
3224                 .nb_align = I40E_ALIGN_RING_DESC,
3225                 .nb_seg_max = I40E_TX_MAX_SEG,
3226                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3227         };
3228
3229         if (pf->flags & I40E_FLAG_VMDQ) {
3230                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3231                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3232                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3233                                                 pf->max_nb_vmdq_vsi;
3234                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3235                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3236                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3237         }
3238
3239         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
3240                 /* For XL710 */
3241                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3242         else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
3243                 /* For XXV710 */
3244                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3245         else
3246                 /* For X710 */
3247                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3248 }
3249
3250 static int
3251 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3252 {
3253         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3254         struct i40e_vsi *vsi = pf->main_vsi;
3255         PMD_INIT_FUNC_TRACE();
3256
3257         if (on)
3258                 return i40e_vsi_add_vlan(vsi, vlan_id);
3259         else
3260                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3261 }
3262
3263 static int
3264 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3265                                 enum rte_vlan_type vlan_type,
3266                                 uint16_t tpid, int qinq)
3267 {
3268         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3269         uint64_t reg_r = 0;
3270         uint64_t reg_w = 0;
3271         uint16_t reg_id = 3;
3272         int ret;
3273
3274         if (qinq) {
3275                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3276                         reg_id = 2;
3277         }
3278
3279         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3280                                           &reg_r, NULL);
3281         if (ret != I40E_SUCCESS) {
3282                 PMD_DRV_LOG(ERR,
3283                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3284                            reg_id);
3285                 return -EIO;
3286         }
3287         PMD_DRV_LOG(DEBUG,
3288                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3289                     reg_id, reg_r);
3290
3291         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3292         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3293         if (reg_r == reg_w) {
3294                 PMD_DRV_LOG(DEBUG, "No need to write");
3295                 return 0;
3296         }
3297
3298         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3299                                            reg_w, NULL);
3300         if (ret != I40E_SUCCESS) {
3301                 PMD_DRV_LOG(ERR,
3302                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3303                             reg_id);
3304                 return -EIO;
3305         }
3306         PMD_DRV_LOG(DEBUG,
3307                     "Global register 0x%08x is changed with value 0x%08x",
3308                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3309
3310         return 0;
3311 }
3312
3313 static int
3314 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3315                    enum rte_vlan_type vlan_type,
3316                    uint16_t tpid)
3317 {
3318         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3319         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3320         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
3321         int ret = 0;
3322
3323         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3324              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3325             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3326                 PMD_DRV_LOG(ERR,
3327                             "Unsupported vlan type.");
3328                 return -EINVAL;
3329         }
3330
3331         if (pf->support_multi_driver) {
3332                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3333                 return -ENOTSUP;
3334         }
3335
3336         /* 802.1ad frames ability is added in NVM API 1.7*/
3337         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3338                 if (qinq) {
3339                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3340                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3341                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3342                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3343                 } else {
3344                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3345                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3346                 }
3347                 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3348                 if (ret != I40E_SUCCESS) {
3349                         PMD_DRV_LOG(ERR,
3350                                     "Set switch config failed aq_err: %d",
3351                                     hw->aq.asq_last_status);
3352                         ret = -EIO;
3353                 }
3354         } else
3355                 /* If NVM API < 1.7, keep the register setting */
3356                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3357                                                       tpid, qinq);
3358         i40e_global_cfg_warning(I40E_WARNING_TPID);
3359
3360         return ret;
3361 }
3362
3363 static int
3364 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3365 {
3366         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3367         struct i40e_vsi *vsi = pf->main_vsi;
3368
3369         if (mask & ETH_VLAN_FILTER_MASK) {
3370                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3371                         i40e_vsi_config_vlan_filter(vsi, TRUE);
3372                 else
3373                         i40e_vsi_config_vlan_filter(vsi, FALSE);
3374         }
3375
3376         if (mask & ETH_VLAN_STRIP_MASK) {
3377                 /* Enable or disable VLAN stripping */
3378                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
3379                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
3380                 else
3381                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
3382         }
3383
3384         if (mask & ETH_VLAN_EXTEND_MASK) {
3385                 if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
3386                         i40e_vsi_config_double_vlan(vsi, TRUE);
3387                         /* Set global registers with default ethertype. */
3388                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3389                                            ETHER_TYPE_VLAN);
3390                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3391                                            ETHER_TYPE_VLAN);
3392                 }
3393                 else
3394                         i40e_vsi_config_double_vlan(vsi, FALSE);
3395         }
3396
3397         return 0;
3398 }
3399
3400 static void
3401 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3402                           __rte_unused uint16_t queue,
3403                           __rte_unused int on)
3404 {
3405         PMD_INIT_FUNC_TRACE();
3406 }
3407
3408 static int
3409 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3410 {
3411         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3412         struct i40e_vsi *vsi = pf->main_vsi;
3413         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3414         struct i40e_vsi_vlan_pvid_info info;
3415
3416         memset(&info, 0, sizeof(info));
3417         info.on = on;
3418         if (info.on)
3419                 info.config.pvid = pvid;
3420         else {
3421                 info.config.reject.tagged =
3422                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
3423                 info.config.reject.untagged =
3424                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
3425         }
3426
3427         return i40e_vsi_vlan_pvid_set(vsi, &info);
3428 }
3429
3430 static int
3431 i40e_dev_led_on(struct rte_eth_dev *dev)
3432 {
3433         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3434         uint32_t mode = i40e_led_get(hw);
3435
3436         if (mode == 0)
3437                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3438
3439         return 0;
3440 }
3441
3442 static int
3443 i40e_dev_led_off(struct rte_eth_dev *dev)
3444 {
3445         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3446         uint32_t mode = i40e_led_get(hw);
3447
3448         if (mode != 0)
3449                 i40e_led_set(hw, 0, false);
3450
3451         return 0;
3452 }
3453
3454 static int
3455 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3456 {
3457         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3458         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3459
3460         fc_conf->pause_time = pf->fc_conf.pause_time;
3461
3462         /* read out from register, in case they are modified by other port */
3463         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3464                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3465         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3466                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3467
3468         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3469         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3470
3471          /* Return current mode according to actual setting*/
3472         switch (hw->fc.current_mode) {
3473         case I40E_FC_FULL:
3474                 fc_conf->mode = RTE_FC_FULL;
3475                 break;
3476         case I40E_FC_TX_PAUSE:
3477                 fc_conf->mode = RTE_FC_TX_PAUSE;
3478                 break;
3479         case I40E_FC_RX_PAUSE:
3480                 fc_conf->mode = RTE_FC_RX_PAUSE;
3481                 break;
3482         case I40E_FC_NONE:
3483         default:
3484                 fc_conf->mode = RTE_FC_NONE;
3485         };
3486
3487         return 0;
3488 }
3489
3490 static int
3491 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3492 {
3493         uint32_t mflcn_reg, fctrl_reg, reg;
3494         uint32_t max_high_water;
3495         uint8_t i, aq_failure;
3496         int err;
3497         struct i40e_hw *hw;
3498         struct i40e_pf *pf;
3499         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3500                 [RTE_FC_NONE] = I40E_FC_NONE,
3501                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3502                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3503                 [RTE_FC_FULL] = I40E_FC_FULL
3504         };
3505
3506         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3507
3508         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3509         if ((fc_conf->high_water > max_high_water) ||
3510                         (fc_conf->high_water < fc_conf->low_water)) {
3511                 PMD_INIT_LOG(ERR,
3512                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
3513                         max_high_water);
3514                 return -EINVAL;
3515         }
3516
3517         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3518         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3519         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3520
3521         pf->fc_conf.pause_time = fc_conf->pause_time;
3522         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3523         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3524
3525         PMD_INIT_FUNC_TRACE();
3526
3527         /* All the link flow control related enable/disable register
3528          * configuration is handle by the F/W
3529          */
3530         err = i40e_set_fc(hw, &aq_failure, true);
3531         if (err < 0)
3532                 return -ENOSYS;
3533
3534         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3535                 /* Configure flow control refresh threshold,
3536                  * the value for stat_tx_pause_refresh_timer[8]
3537                  * is used for global pause operation.
3538                  */
3539
3540                 I40E_WRITE_REG(hw,
3541                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3542                                pf->fc_conf.pause_time);
3543
3544                 /* configure the timer value included in transmitted pause
3545                  * frame,
3546                  * the value for stat_tx_pause_quanta[8] is used for global
3547                  * pause operation
3548                  */
3549                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3550                                pf->fc_conf.pause_time);
3551
3552                 fctrl_reg = I40E_READ_REG(hw,
3553                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3554
3555                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3556                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3557                 else
3558                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3559
3560                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3561                                fctrl_reg);
3562         } else {
3563                 /* Configure pause time (2 TCs per register) */
3564                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3565                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3566                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3567
3568                 /* Configure flow control refresh threshold value */
3569                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3570                                pf->fc_conf.pause_time / 2);
3571
3572                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3573
3574                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3575                  *depending on configuration
3576                  */
3577                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3578                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3579                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3580                 } else {
3581                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3582                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3583                 }
3584
3585                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3586         }
3587
3588         if (!pf->support_multi_driver) {
3589                 /* config water marker both based on the packets and bytes */
3590                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
3591                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3592                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3593                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
3594                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3595                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3596                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
3597                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3598                                   << I40E_KILOSHIFT);
3599                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
3600                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3601                                    << I40E_KILOSHIFT);
3602                 i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
3603         } else {
3604                 PMD_DRV_LOG(ERR,
3605                             "Water marker configuration is not supported.");
3606         }
3607
3608         I40E_WRITE_FLUSH(hw);
3609
3610         return 0;
3611 }
3612
3613 static int
3614 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3615                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3616 {
3617         PMD_INIT_FUNC_TRACE();
3618
3619         return -ENOSYS;
3620 }
3621
3622 /* Add a MAC address, and update filters */
3623 static int
3624 i40e_macaddr_add(struct rte_eth_dev *dev,
3625                  struct ether_addr *mac_addr,
3626                  __rte_unused uint32_t index,
3627                  uint32_t pool)
3628 {
3629         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3630         struct i40e_mac_filter_info mac_filter;
3631         struct i40e_vsi *vsi;
3632         int ret;
3633
3634         /* If VMDQ not enabled or configured, return */
3635         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3636                           !pf->nb_cfg_vmdq_vsi)) {
3637                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3638                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3639                         pool);
3640                 return -ENOTSUP;
3641         }
3642
3643         if (pool > pf->nb_cfg_vmdq_vsi) {
3644                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3645                                 pool, pf->nb_cfg_vmdq_vsi);
3646                 return -EINVAL;
3647         }
3648
3649         rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3650         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3651                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3652         else
3653                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3654
3655         if (pool == 0)
3656                 vsi = pf->main_vsi;
3657         else
3658                 vsi = pf->vmdq[pool - 1].vsi;
3659
3660         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3661         if (ret != I40E_SUCCESS) {
3662                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3663                 return -ENODEV;
3664         }
3665         return 0;
3666 }
3667
3668 /* Remove a MAC address, and update filters */
3669 static void
3670 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3671 {
3672         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3673         struct i40e_vsi *vsi;
3674         struct rte_eth_dev_data *data = dev->data;
3675         struct ether_addr *macaddr;
3676         int ret;
3677         uint32_t i;
3678         uint64_t pool_sel;
3679
3680         macaddr = &(data->mac_addrs[index]);
3681
3682         pool_sel = dev->data->mac_pool_sel[index];
3683
3684         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3685                 if (pool_sel & (1ULL << i)) {
3686                         if (i == 0)
3687                                 vsi = pf->main_vsi;
3688                         else {
3689                                 /* No VMDQ pool enabled or configured */
3690                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3691                                         (i > pf->nb_cfg_vmdq_vsi)) {
3692                                         PMD_DRV_LOG(ERR,
3693                                                 "No VMDQ pool enabled/configured");
3694                                         return;
3695                                 }
3696                                 vsi = pf->vmdq[i - 1].vsi;
3697                         }
3698                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3699
3700                         if (ret) {
3701                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3702                                 return;
3703                         }
3704                 }
3705         }
3706 }
3707
3708 /* Set perfect match or hash match of MAC and VLAN for a VF */
3709 static int
3710 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3711                  struct rte_eth_mac_filter *filter,
3712                  bool add)
3713 {
3714         struct i40e_hw *hw;
3715         struct i40e_mac_filter_info mac_filter;
3716         struct ether_addr old_mac;
3717         struct ether_addr *new_mac;
3718         struct i40e_pf_vf *vf = NULL;
3719         uint16_t vf_id;
3720         int ret;
3721
3722         if (pf == NULL) {
3723                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3724                 return -EINVAL;
3725         }
3726         hw = I40E_PF_TO_HW(pf);
3727
3728         if (filter == NULL) {
3729                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3730                 return -EINVAL;
3731         }
3732
3733         new_mac = &filter->mac_addr;
3734
3735         if (is_zero_ether_addr(new_mac)) {
3736                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3737                 return -EINVAL;
3738         }
3739
3740         vf_id = filter->dst_id;
3741
3742         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3743                 PMD_DRV_LOG(ERR, "Invalid argument.");
3744                 return -EINVAL;
3745         }
3746         vf = &pf->vfs[vf_id];
3747
3748         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3749                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3750                 return -EINVAL;
3751         }
3752
3753         if (add) {
3754                 rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3755                 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3756                                 ETHER_ADDR_LEN);
3757                 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3758                                  ETHER_ADDR_LEN);
3759
3760                 mac_filter.filter_type = filter->filter_type;
3761                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3762                 if (ret != I40E_SUCCESS) {
3763                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3764                         return -1;
3765                 }
3766                 ether_addr_copy(new_mac, &pf->dev_addr);
3767         } else {
3768                 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3769                                 ETHER_ADDR_LEN);
3770                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3771                 if (ret != I40E_SUCCESS) {
3772                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3773                         return -1;
3774                 }
3775
3776                 /* Clear device address as it has been removed */
3777                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3778                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3779         }
3780
3781         return 0;
3782 }
3783
3784 /* MAC filter handle */
3785 static int
3786 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3787                 void *arg)
3788 {
3789         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3790         struct rte_eth_mac_filter *filter;
3791         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3792         int ret = I40E_NOT_SUPPORTED;
3793
3794         filter = (struct rte_eth_mac_filter *)(arg);
3795
3796         switch (filter_op) {
3797         case RTE_ETH_FILTER_NOP:
3798                 ret = I40E_SUCCESS;
3799                 break;
3800         case RTE_ETH_FILTER_ADD:
3801                 i40e_pf_disable_irq0(hw);
3802                 if (filter->is_vf)
3803                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3804                 i40e_pf_enable_irq0(hw);
3805                 break;
3806         case RTE_ETH_FILTER_DELETE:
3807                 i40e_pf_disable_irq0(hw);
3808                 if (filter->is_vf)
3809                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3810                 i40e_pf_enable_irq0(hw);
3811                 break;
3812         default:
3813                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3814                 ret = I40E_ERR_PARAM;
3815                 break;
3816         }
3817
3818         return ret;
3819 }
3820
3821 static int
3822 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3823 {
3824         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3825         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3826         uint32_t reg;
3827         int ret;
3828
3829         if (!lut)
3830                 return -EINVAL;
3831
3832         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3833                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3834                                           lut, lut_size);
3835                 if (ret) {
3836                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3837                         return ret;
3838                 }
3839         } else {
3840                 uint32_t *lut_dw = (uint32_t *)lut;
3841                 uint16_t i, lut_size_dw = lut_size / 4;
3842
3843                 if (vsi->type == I40E_VSI_SRIOV) {
3844                         for (i = 0; i <= lut_size_dw; i++) {
3845                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
3846                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
3847                         }
3848                 } else {
3849                         for (i = 0; i < lut_size_dw; i++)
3850                                 lut_dw[i] = I40E_READ_REG(hw,
3851                                                           I40E_PFQF_HLUT(i));
3852                 }
3853         }
3854
3855         return 0;
3856 }
3857
3858 int
3859 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3860 {
3861         struct i40e_pf *pf;
3862         struct i40e_hw *hw;
3863         int ret;
3864
3865         if (!vsi || !lut)
3866                 return -EINVAL;
3867
3868         pf = I40E_VSI_TO_PF(vsi);
3869         hw = I40E_VSI_TO_HW(vsi);
3870
3871         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3872                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
3873                                           lut, lut_size);
3874                 if (ret) {
3875                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3876                         return ret;
3877                 }
3878         } else {
3879                 uint32_t *lut_dw = (uint32_t *)lut;
3880                 uint16_t i, lut_size_dw = lut_size / 4;
3881
3882                 if (vsi->type == I40E_VSI_SRIOV) {
3883                         for (i = 0; i < lut_size_dw; i++)
3884                                 I40E_WRITE_REG(
3885                                         hw,
3886                                         I40E_VFQF_HLUT1(i, vsi->user_param),
3887                                         lut_dw[i]);
3888                 } else {
3889                         for (i = 0; i < lut_size_dw; i++)
3890                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
3891                                                lut_dw[i]);
3892                 }
3893                 I40E_WRITE_FLUSH(hw);
3894         }
3895
3896         return 0;
3897 }
3898
3899 static int
3900 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
3901                          struct rte_eth_rss_reta_entry64 *reta_conf,
3902                          uint16_t reta_size)
3903 {
3904         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3905         uint16_t i, lut_size = pf->hash_lut_size;
3906         uint16_t idx, shift;
3907         uint8_t *lut;
3908         int ret;
3909
3910         if (reta_size != lut_size ||
3911                 reta_size > ETH_RSS_RETA_SIZE_512) {
3912                 PMD_DRV_LOG(ERR,
3913                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3914                         reta_size, lut_size);
3915                 return -EINVAL;
3916         }
3917
3918         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3919         if (!lut) {
3920                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3921                 return -ENOMEM;
3922         }
3923         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3924         if (ret)
3925                 goto out;
3926         for (i = 0; i < reta_size; i++) {
3927                 idx = i / RTE_RETA_GROUP_SIZE;
3928                 shift = i % RTE_RETA_GROUP_SIZE;
3929                 if (reta_conf[idx].mask & (1ULL << shift))
3930                         lut[i] = reta_conf[idx].reta[shift];
3931         }
3932         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
3933
3934 out:
3935         rte_free(lut);
3936
3937         return ret;
3938 }
3939
3940 static int
3941 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
3942                         struct rte_eth_rss_reta_entry64 *reta_conf,
3943                         uint16_t reta_size)
3944 {
3945         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3946         uint16_t i, lut_size = pf->hash_lut_size;
3947         uint16_t idx, shift;
3948         uint8_t *lut;
3949         int ret;
3950
3951         if (reta_size != lut_size ||
3952                 reta_size > ETH_RSS_RETA_SIZE_512) {
3953                 PMD_DRV_LOG(ERR,
3954                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3955                         reta_size, lut_size);
3956                 return -EINVAL;
3957         }
3958
3959         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3960         if (!lut) {
3961                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3962                 return -ENOMEM;
3963         }
3964
3965         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3966         if (ret)
3967                 goto out;
3968         for (i = 0; i < reta_size; i++) {
3969                 idx = i / RTE_RETA_GROUP_SIZE;
3970                 shift = i % RTE_RETA_GROUP_SIZE;
3971                 if (reta_conf[idx].mask & (1ULL << shift))
3972                         reta_conf[idx].reta[shift] = lut[i];
3973         }
3974
3975 out:
3976         rte_free(lut);
3977
3978         return ret;
3979 }
3980
3981 /**
3982  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
3983  * @hw:   pointer to the HW structure
3984  * @mem:  pointer to mem struct to fill out
3985  * @size: size of memory requested
3986  * @alignment: what to align the allocation to
3987  **/
3988 enum i40e_status_code
3989 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3990                         struct i40e_dma_mem *mem,
3991                         u64 size,
3992                         u32 alignment)
3993 {
3994         const struct rte_memzone *mz = NULL;
3995         char z_name[RTE_MEMZONE_NAMESIZE];
3996
3997         if (!mem)
3998                 return I40E_ERR_PARAM;
3999
4000         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4001         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
4002                                          alignment, RTE_PGSIZE_2M);
4003         if (!mz)
4004                 return I40E_ERR_NO_MEMORY;
4005
4006         mem->size = size;
4007         mem->va = mz->addr;
4008         mem->pa = mz->iova;
4009         mem->zone = (const void *)mz;
4010         PMD_DRV_LOG(DEBUG,
4011                 "memzone %s allocated with physical address: %"PRIu64,
4012                 mz->name, mem->pa);
4013
4014         return I40E_SUCCESS;
4015 }
4016
4017 /**
4018  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4019  * @hw:   pointer to the HW structure
4020  * @mem:  ptr to mem struct to free
4021  **/
4022 enum i40e_status_code
4023 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4024                     struct i40e_dma_mem *mem)
4025 {
4026         if (!mem)
4027                 return I40E_ERR_PARAM;
4028
4029         PMD_DRV_LOG(DEBUG,
4030                 "memzone %s to be freed with physical address: %"PRIu64,
4031                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4032         rte_memzone_free((const struct rte_memzone *)mem->zone);
4033         mem->zone = NULL;
4034         mem->va = NULL;
4035         mem->pa = (u64)0;
4036
4037         return I40E_SUCCESS;
4038 }
4039
4040 /**
4041  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4042  * @hw:   pointer to the HW structure
4043  * @mem:  pointer to mem struct to fill out
4044  * @size: size of memory requested
4045  **/
4046 enum i40e_status_code
4047 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4048                          struct i40e_virt_mem *mem,
4049                          u32 size)
4050 {
4051         if (!mem)
4052                 return I40E_ERR_PARAM;
4053
4054         mem->size = size;
4055         mem->va = rte_zmalloc("i40e", size, 0);
4056
4057         if (mem->va)
4058                 return I40E_SUCCESS;
4059         else
4060                 return I40E_ERR_NO_MEMORY;
4061 }
4062
4063 /**
4064  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4065  * @hw:   pointer to the HW structure
4066  * @mem:  pointer to mem struct to free
4067  **/
4068 enum i40e_status_code
4069 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4070                      struct i40e_virt_mem *mem)
4071 {
4072         if (!mem)
4073                 return I40E_ERR_PARAM;
4074
4075         rte_free(mem->va);
4076         mem->va = NULL;
4077
4078         return I40E_SUCCESS;
4079 }
4080
4081 void
4082 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4083 {
4084         rte_spinlock_init(&sp->spinlock);
4085 }
4086
4087 void
4088 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4089 {
4090         rte_spinlock_lock(&sp->spinlock);
4091 }
4092
4093 void
4094 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4095 {
4096         rte_spinlock_unlock(&sp->spinlock);
4097 }
4098
4099 void
4100 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
4101 {
4102         return;
4103 }
4104
4105 /**
4106  * Get the hardware capabilities, which will be parsed
4107  * and saved into struct i40e_hw.
4108  */
4109 static int
4110 i40e_get_cap(struct i40e_hw *hw)
4111 {
4112         struct i40e_aqc_list_capabilities_element_resp *buf;
4113         uint16_t len, size = 0;
4114         int ret;
4115
4116         /* Calculate a huge enough buff for saving response data temporarily */
4117         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4118                                                 I40E_MAX_CAP_ELE_NUM;
4119         buf = rte_zmalloc("i40e", len, 0);
4120         if (!buf) {
4121                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4122                 return I40E_ERR_NO_MEMORY;
4123         }
4124
4125         /* Get, parse the capabilities and save it to hw */
4126         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4127                         i40e_aqc_opc_list_func_capabilities, NULL);
4128         if (ret != I40E_SUCCESS)
4129                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4130
4131         /* Free the temporary buffer after being used */
4132         rte_free(buf);
4133
4134         return ret;
4135 }
4136
4137 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4138 #define QUEUE_NUM_PER_VF_ARG                    "queue-num-per-vf"
4139
4140 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4141                 const char *value,
4142                 void *opaque)
4143 {
4144         struct i40e_pf *pf;
4145         unsigned long num;
4146         char *end;
4147
4148         pf = (struct i40e_pf *)opaque;
4149         RTE_SET_USED(key);
4150
4151         errno = 0;
4152         num = strtoul(value, &end, 0);
4153         if (errno != 0 || end == value || *end != 0) {
4154                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4155                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4156                 return -(EINVAL);
4157         }
4158
4159         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4160                 pf->vf_nb_qp_max = (uint16_t)num;
4161         else
4162                 /* here return 0 to make next valid same argument work */
4163                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4164                             "power of 2 and equal or less than 16 !, Now it is "
4165                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4166
4167         return 0;
4168 }
4169
4170 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4171 {
4172         static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
4173         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4174         struct rte_kvargs *kvlist;
4175
4176         /* set default queue number per VF as 4 */
4177         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4178
4179         if (dev->device->devargs == NULL)
4180                 return 0;
4181
4182         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4183         if (kvlist == NULL)
4184                 return -(EINVAL);
4185
4186         if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
4187                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4188                             "the first invalid or last valid one is used !",
4189                             QUEUE_NUM_PER_VF_ARG);
4190
4191         rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
4192                            i40e_pf_parse_vf_queue_number_handler, pf);
4193
4194         rte_kvargs_free(kvlist);
4195
4196         return 0;
4197 }
4198
4199 static int
4200 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4201 {
4202         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4203         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4204         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4205         uint16_t qp_count = 0, vsi_count = 0;
4206
4207         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4208                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4209                 return -EINVAL;
4210         }
4211
4212         i40e_pf_config_vf_rxq_number(dev);
4213
4214         /* Add the parameter init for LFC */
4215         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4216         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4217         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4218
4219         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4220         pf->max_num_vsi = hw->func_caps.num_vsis;
4221         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4222         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4223
4224         /* FDir queue/VSI allocation */
4225         pf->fdir_qp_offset = 0;
4226         if (hw->func_caps.fd) {
4227                 pf->flags |= I40E_FLAG_FDIR;
4228                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4229         } else {
4230                 pf->fdir_nb_qps = 0;
4231         }
4232         qp_count += pf->fdir_nb_qps;
4233         vsi_count += 1;
4234
4235         /* LAN queue/VSI allocation */
4236         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4237         if (!hw->func_caps.rss) {
4238                 pf->lan_nb_qps = 1;
4239         } else {
4240                 pf->flags |= I40E_FLAG_RSS;
4241                 if (hw->mac.type == I40E_MAC_X722)
4242                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4243                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4244         }
4245         qp_count += pf->lan_nb_qps;
4246         vsi_count += 1;
4247
4248         /* VF queue/VSI allocation */
4249         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4250         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4251                 pf->flags |= I40E_FLAG_SRIOV;
4252                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4253                 pf->vf_num = pci_dev->max_vfs;
4254                 PMD_DRV_LOG(DEBUG,
4255                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4256                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4257         } else {
4258                 pf->vf_nb_qps = 0;
4259                 pf->vf_num = 0;
4260         }
4261         qp_count += pf->vf_nb_qps * pf->vf_num;
4262         vsi_count += pf->vf_num;
4263
4264         /* VMDq queue/VSI allocation */
4265         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4266         pf->vmdq_nb_qps = 0;
4267         pf->max_nb_vmdq_vsi = 0;
4268         if (hw->func_caps.vmdq) {
4269                 if (qp_count < hw->func_caps.num_tx_qp &&
4270                         vsi_count < hw->func_caps.num_vsis) {
4271                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4272                                 qp_count) / pf->vmdq_nb_qp_max;
4273
4274                         /* Limit the maximum number of VMDq vsi to the maximum
4275                          * ethdev can support
4276                          */
4277                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4278                                 hw->func_caps.num_vsis - vsi_count);
4279                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4280                                 ETH_64_POOLS);
4281                         if (pf->max_nb_vmdq_vsi) {
4282                                 pf->flags |= I40E_FLAG_VMDQ;
4283                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4284                                 PMD_DRV_LOG(DEBUG,
4285                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4286                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4287                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4288                         } else {
4289                                 PMD_DRV_LOG(INFO,
4290                                         "No enough queues left for VMDq");
4291                         }
4292                 } else {
4293                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4294                 }
4295         }
4296         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4297         vsi_count += pf->max_nb_vmdq_vsi;
4298
4299         if (hw->func_caps.dcb)
4300                 pf->flags |= I40E_FLAG_DCB;
4301
4302         if (qp_count > hw->func_caps.num_tx_qp) {
4303                 PMD_DRV_LOG(ERR,
4304                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4305                         qp_count, hw->func_caps.num_tx_qp);
4306                 return -EINVAL;
4307         }
4308         if (vsi_count > hw->func_caps.num_vsis) {
4309                 PMD_DRV_LOG(ERR,
4310                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4311                         vsi_count, hw->func_caps.num_vsis);
4312                 return -EINVAL;
4313         }
4314
4315         return 0;
4316 }
4317
4318 static int
4319 i40e_pf_get_switch_config(struct i40e_pf *pf)
4320 {
4321         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4322         struct i40e_aqc_get_switch_config_resp *switch_config;
4323         struct i40e_aqc_switch_config_element_resp *element;
4324         uint16_t start_seid = 0, num_reported;
4325         int ret;
4326
4327         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4328                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4329         if (!switch_config) {
4330                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4331                 return -ENOMEM;
4332         }
4333
4334         /* Get the switch configurations */
4335         ret = i40e_aq_get_switch_config(hw, switch_config,
4336                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4337         if (ret != I40E_SUCCESS) {
4338                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4339                 goto fail;
4340         }
4341         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4342         if (num_reported != 1) { /* The number should be 1 */
4343                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4344                 goto fail;
4345         }
4346
4347         /* Parse the switch configuration elements */
4348         element = &(switch_config->element[0]);
4349         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4350                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4351                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4352         } else
4353                 PMD_DRV_LOG(INFO, "Unknown element type");
4354
4355 fail:
4356         rte_free(switch_config);
4357
4358         return ret;
4359 }
4360
4361 static int
4362 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4363                         uint32_t num)
4364 {
4365         struct pool_entry *entry;
4366
4367         if (pool == NULL || num == 0)
4368                 return -EINVAL;
4369
4370         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4371         if (entry == NULL) {
4372                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4373                 return -ENOMEM;
4374         }
4375
4376         /* queue heap initialize */
4377         pool->num_free = num;
4378         pool->num_alloc = 0;
4379         pool->base = base;
4380         LIST_INIT(&pool->alloc_list);
4381         LIST_INIT(&pool->free_list);
4382
4383         /* Initialize element  */
4384         entry->base = 0;
4385         entry->len = num;
4386
4387         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4388         return 0;
4389 }
4390
4391 static void
4392 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4393 {
4394         struct pool_entry *entry, *next_entry;
4395
4396         if (pool == NULL)
4397                 return;
4398
4399         for (entry = LIST_FIRST(&pool->alloc_list);
4400                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4401                         entry = next_entry) {
4402                 LIST_REMOVE(entry, next);
4403                 rte_free(entry);
4404         }
4405
4406         for (entry = LIST_FIRST(&pool->free_list);
4407                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4408                         entry = next_entry) {
4409                 LIST_REMOVE(entry, next);
4410                 rte_free(entry);
4411         }
4412
4413         pool->num_free = 0;
4414         pool->num_alloc = 0;
4415         pool->base = 0;
4416         LIST_INIT(&pool->alloc_list);
4417         LIST_INIT(&pool->free_list);
4418 }
4419
4420 static int
4421 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4422                        uint32_t base)
4423 {
4424         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4425         uint32_t pool_offset;
4426         int insert;
4427
4428         if (pool == NULL) {
4429                 PMD_DRV_LOG(ERR, "Invalid parameter");
4430                 return -EINVAL;
4431         }
4432
4433         pool_offset = base - pool->base;
4434         /* Lookup in alloc list */
4435         LIST_FOREACH(entry, &pool->alloc_list, next) {
4436                 if (entry->base == pool_offset) {
4437                         valid_entry = entry;
4438                         LIST_REMOVE(entry, next);
4439                         break;
4440                 }
4441         }
4442
4443         /* Not find, return */
4444         if (valid_entry == NULL) {
4445                 PMD_DRV_LOG(ERR, "Failed to find entry");
4446                 return -EINVAL;
4447         }
4448
4449         /**
4450          * Found it, move it to free list  and try to merge.
4451          * In order to make merge easier, always sort it by qbase.
4452          * Find adjacent prev and last entries.
4453          */
4454         prev = next = NULL;
4455         LIST_FOREACH(entry, &pool->free_list, next) {
4456                 if (entry->base > valid_entry->base) {
4457                         next = entry;
4458                         break;
4459                 }
4460                 prev = entry;
4461         }
4462
4463         insert = 0;
4464         /* Try to merge with next one*/
4465         if (next != NULL) {
4466                 /* Merge with next one */
4467                 if (valid_entry->base + valid_entry->len == next->base) {
4468                         next->base = valid_entry->base;
4469                         next->len += valid_entry->len;
4470                         rte_free(valid_entry);
4471                         valid_entry = next;
4472                         insert = 1;
4473                 }
4474         }
4475
4476         if (prev != NULL) {
4477                 /* Merge with previous one */
4478                 if (prev->base + prev->len == valid_entry->base) {
4479                         prev->len += valid_entry->len;
4480                         /* If it merge with next one, remove next node */
4481                         if (insert == 1) {
4482                                 LIST_REMOVE(valid_entry, next);
4483                                 rte_free(valid_entry);
4484                         } else {
4485                                 rte_free(valid_entry);
4486                                 insert = 1;
4487                         }
4488                 }
4489         }
4490
4491         /* Not find any entry to merge, insert */
4492         if (insert == 0) {
4493                 if (prev != NULL)
4494                         LIST_INSERT_AFTER(prev, valid_entry, next);
4495                 else if (next != NULL)
4496                         LIST_INSERT_BEFORE(next, valid_entry, next);
4497                 else /* It's empty list, insert to head */
4498                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4499         }
4500
4501         pool->num_free += valid_entry->len;
4502         pool->num_alloc -= valid_entry->len;
4503
4504         return 0;
4505 }
4506
4507 static int
4508 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4509                        uint16_t num)
4510 {
4511         struct pool_entry *entry, *valid_entry;
4512
4513         if (pool == NULL || num == 0) {
4514                 PMD_DRV_LOG(ERR, "Invalid parameter");
4515                 return -EINVAL;
4516         }
4517
4518         if (pool->num_free < num) {
4519                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4520                             num, pool->num_free);
4521                 return -ENOMEM;
4522         }
4523
4524         valid_entry = NULL;
4525         /* Lookup  in free list and find most fit one */
4526         LIST_FOREACH(entry, &pool->free_list, next) {
4527                 if (entry->len >= num) {
4528                         /* Find best one */
4529                         if (entry->len == num) {
4530                                 valid_entry = entry;
4531                                 break;
4532                         }
4533                         if (valid_entry == NULL || valid_entry->len > entry->len)
4534                                 valid_entry = entry;
4535                 }
4536         }
4537
4538         /* Not find one to satisfy the request, return */
4539         if (valid_entry == NULL) {
4540                 PMD_DRV_LOG(ERR, "No valid entry found");
4541                 return -ENOMEM;
4542         }
4543         /**
4544          * The entry have equal queue number as requested,
4545          * remove it from alloc_list.
4546          */
4547         if (valid_entry->len == num) {
4548                 LIST_REMOVE(valid_entry, next);
4549         } else {
4550                 /**
4551                  * The entry have more numbers than requested,
4552                  * create a new entry for alloc_list and minus its
4553                  * queue base and number in free_list.
4554                  */
4555                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4556                 if (entry == NULL) {
4557                         PMD_DRV_LOG(ERR,
4558                                 "Failed to allocate memory for resource pool");
4559                         return -ENOMEM;
4560                 }
4561                 entry->base = valid_entry->base;
4562                 entry->len = num;
4563                 valid_entry->base += num;
4564                 valid_entry->len -= num;
4565                 valid_entry = entry;
4566         }
4567
4568         /* Insert it into alloc list, not sorted */
4569         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4570
4571         pool->num_free -= valid_entry->len;
4572         pool->num_alloc += valid_entry->len;
4573
4574         return valid_entry->base + pool->base;
4575 }
4576
4577 /**
4578  * bitmap_is_subset - Check whether src2 is subset of src1
4579  **/
4580 static inline int
4581 bitmap_is_subset(uint8_t src1, uint8_t src2)
4582 {
4583         return !((src1 ^ src2) & src2);
4584 }
4585
4586 static enum i40e_status_code
4587 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4588 {
4589         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4590
4591         /* If DCB is not supported, only default TC is supported */
4592         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4593                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4594                 return I40E_NOT_SUPPORTED;
4595         }
4596
4597         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4598                 PMD_DRV_LOG(ERR,
4599                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
4600                         hw->func_caps.enabled_tcmap, enabled_tcmap);
4601                 return I40E_NOT_SUPPORTED;
4602         }
4603         return I40E_SUCCESS;
4604 }
4605
4606 int
4607 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4608                                 struct i40e_vsi_vlan_pvid_info *info)
4609 {
4610         struct i40e_hw *hw;
4611         struct i40e_vsi_context ctxt;
4612         uint8_t vlan_flags = 0;
4613         int ret;
4614
4615         if (vsi == NULL || info == NULL) {
4616                 PMD_DRV_LOG(ERR, "invalid parameters");
4617                 return I40E_ERR_PARAM;
4618         }
4619
4620         if (info->on) {
4621                 vsi->info.pvid = info->config.pvid;
4622                 /**
4623                  * If insert pvid is enabled, only tagged pkts are
4624                  * allowed to be sent out.
4625                  */
4626                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4627                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4628         } else {
4629                 vsi->info.pvid = 0;
4630                 if (info->config.reject.tagged == 0)
4631                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4632
4633                 if (info->config.reject.untagged == 0)
4634                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4635         }
4636         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4637                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
4638         vsi->info.port_vlan_flags |= vlan_flags;
4639         vsi->info.valid_sections =
4640                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4641         memset(&ctxt, 0, sizeof(ctxt));
4642         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4643         ctxt.seid = vsi->seid;
4644
4645         hw = I40E_VSI_TO_HW(vsi);
4646         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4647         if (ret != I40E_SUCCESS)
4648                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4649
4650         return ret;
4651 }
4652
4653 static int
4654 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4655 {
4656         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4657         int i, ret;
4658         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4659
4660         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4661         if (ret != I40E_SUCCESS)
4662                 return ret;
4663
4664         if (!vsi->seid) {
4665                 PMD_DRV_LOG(ERR, "seid not valid");
4666                 return -EINVAL;
4667         }
4668
4669         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4670         tc_bw_data.tc_valid_bits = enabled_tcmap;
4671         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4672                 tc_bw_data.tc_bw_credits[i] =
4673                         (enabled_tcmap & (1 << i)) ? 1 : 0;
4674
4675         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4676         if (ret != I40E_SUCCESS) {
4677                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4678                 return ret;
4679         }
4680
4681         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4682                                         sizeof(vsi->info.qs_handle));
4683         return I40E_SUCCESS;
4684 }
4685
4686 static enum i40e_status_code
4687 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4688                                  struct i40e_aqc_vsi_properties_data *info,
4689                                  uint8_t enabled_tcmap)
4690 {
4691         enum i40e_status_code ret;
4692         int i, total_tc = 0;
4693         uint16_t qpnum_per_tc, bsf, qp_idx;
4694
4695         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4696         if (ret != I40E_SUCCESS)
4697                 return ret;
4698
4699         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4700                 if (enabled_tcmap & (1 << i))
4701                         total_tc++;
4702         if (total_tc == 0)
4703                 total_tc = 1;
4704         vsi->enabled_tc = enabled_tcmap;
4705
4706         /* Number of queues per enabled TC */
4707         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4708         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4709         bsf = rte_bsf32(qpnum_per_tc);
4710
4711         /* Adjust the queue number to actual queues that can be applied */
4712         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4713                 vsi->nb_qps = qpnum_per_tc * total_tc;
4714
4715         /**
4716          * Configure TC and queue mapping parameters, for enabled TC,
4717          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4718          * default queue will serve it.
4719          */
4720         qp_idx = 0;
4721         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4722                 if (vsi->enabled_tc & (1 << i)) {
4723                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4724                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4725                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4726                         qp_idx += qpnum_per_tc;
4727                 } else
4728                         info->tc_mapping[i] = 0;
4729         }
4730
4731         /* Associate queue number with VSI */
4732         if (vsi->type == I40E_VSI_SRIOV) {
4733                 info->mapping_flags |=
4734                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4735                 for (i = 0; i < vsi->nb_qps; i++)
4736                         info->queue_mapping[i] =
4737                                 rte_cpu_to_le_16(vsi->base_queue + i);
4738         } else {
4739                 info->mapping_flags |=
4740                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4741                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4742         }
4743         info->valid_sections |=
4744                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4745
4746         return I40E_SUCCESS;
4747 }
4748
4749 static int
4750 i40e_veb_release(struct i40e_veb *veb)
4751 {
4752         struct i40e_vsi *vsi;
4753         struct i40e_hw *hw;
4754
4755         if (veb == NULL)
4756                 return -EINVAL;
4757
4758         if (!TAILQ_EMPTY(&veb->head)) {
4759                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4760                 return -EACCES;
4761         }
4762         /* associate_vsi field is NULL for floating VEB */
4763         if (veb->associate_vsi != NULL) {
4764                 vsi = veb->associate_vsi;
4765                 hw = I40E_VSI_TO_HW(vsi);
4766
4767                 vsi->uplink_seid = veb->uplink_seid;
4768                 vsi->veb = NULL;
4769         } else {
4770                 veb->associate_pf->main_vsi->floating_veb = NULL;
4771                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4772         }
4773
4774         i40e_aq_delete_element(hw, veb->seid, NULL);
4775         rte_free(veb);
4776         return I40E_SUCCESS;
4777 }
4778
4779 /* Setup a veb */
4780 static struct i40e_veb *
4781 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4782 {
4783         struct i40e_veb *veb;
4784         int ret;
4785         struct i40e_hw *hw;
4786
4787         if (pf == NULL) {
4788                 PMD_DRV_LOG(ERR,
4789                             "veb setup failed, associated PF shouldn't null");
4790                 return NULL;
4791         }
4792         hw = I40E_PF_TO_HW(pf);
4793
4794         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4795         if (!veb) {
4796                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4797                 goto fail;
4798         }
4799
4800         veb->associate_vsi = vsi;
4801         veb->associate_pf = pf;
4802         TAILQ_INIT(&veb->head);
4803         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4804
4805         /* create floating veb if vsi is NULL */
4806         if (vsi != NULL) {
4807                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4808                                       I40E_DEFAULT_TCMAP, false,
4809                                       &veb->seid, false, NULL);
4810         } else {
4811                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4812                                       true, &veb->seid, false, NULL);
4813         }
4814
4815         if (ret != I40E_SUCCESS) {
4816                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4817                             hw->aq.asq_last_status);
4818                 goto fail;
4819         }
4820         veb->enabled_tc = I40E_DEFAULT_TCMAP;
4821
4822         /* get statistics index */
4823         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4824                                 &veb->stats_idx, NULL, NULL, NULL);
4825         if (ret != I40E_SUCCESS) {
4826                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
4827                             hw->aq.asq_last_status);
4828                 goto fail;
4829         }
4830         /* Get VEB bandwidth, to be implemented */
4831         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4832         if (vsi)
4833                 vsi->uplink_seid = veb->seid;
4834
4835         return veb;
4836 fail:
4837         rte_free(veb);
4838         return NULL;
4839 }
4840
4841 int
4842 i40e_vsi_release(struct i40e_vsi *vsi)
4843 {
4844         struct i40e_pf *pf;
4845         struct i40e_hw *hw;
4846         struct i40e_vsi_list *vsi_list;
4847         void *temp;
4848         int ret;
4849         struct i40e_mac_filter *f;
4850         uint16_t user_param;
4851
4852         if (!vsi)
4853                 return I40E_SUCCESS;
4854
4855         if (!vsi->adapter)
4856                 return -EFAULT;
4857
4858         user_param = vsi->user_param;
4859
4860         pf = I40E_VSI_TO_PF(vsi);
4861         hw = I40E_VSI_TO_HW(vsi);
4862
4863         /* VSI has child to attach, release child first */
4864         if (vsi->veb) {
4865                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
4866                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4867                                 return -1;
4868                 }
4869                 i40e_veb_release(vsi->veb);
4870         }
4871
4872         if (vsi->floating_veb) {
4873                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
4874                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4875                                 return -1;
4876                 }
4877         }
4878
4879         /* Remove all macvlan filters of the VSI */
4880         i40e_vsi_remove_all_macvlan_filter(vsi);
4881         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
4882                 rte_free(f);
4883
4884         if (vsi->type != I40E_VSI_MAIN &&
4885             ((vsi->type != I40E_VSI_SRIOV) ||
4886             !pf->floating_veb_list[user_param])) {
4887                 /* Remove vsi from parent's sibling list */
4888                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
4889                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4890                         return I40E_ERR_PARAM;
4891                 }
4892                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
4893                                 &vsi->sib_vsi_list, list);
4894
4895                 /* Remove all switch element of the VSI */
4896                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4897                 if (ret != I40E_SUCCESS)
4898                         PMD_DRV_LOG(ERR, "Failed to delete element");
4899         }
4900
4901         if ((vsi->type == I40E_VSI_SRIOV) &&
4902             pf->floating_veb_list[user_param]) {
4903                 /* Remove vsi from parent's sibling list */
4904                 if (vsi->parent_vsi == NULL ||
4905                     vsi->parent_vsi->floating_veb == NULL) {
4906                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4907                         return I40E_ERR_PARAM;
4908                 }
4909                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
4910                              &vsi->sib_vsi_list, list);
4911
4912                 /* Remove all switch element of the VSI */
4913                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4914                 if (ret != I40E_SUCCESS)
4915                         PMD_DRV_LOG(ERR, "Failed to delete element");
4916         }
4917
4918         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
4919
4920         if (vsi->type != I40E_VSI_SRIOV)
4921                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
4922         rte_free(vsi);
4923
4924         return I40E_SUCCESS;
4925 }
4926
4927 static int
4928 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
4929 {
4930         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4931         struct i40e_aqc_remove_macvlan_element_data def_filter;
4932         struct i40e_mac_filter_info filter;
4933         int ret;
4934
4935         if (vsi->type != I40E_VSI_MAIN)
4936                 return I40E_ERR_CONFIG;
4937         memset(&def_filter, 0, sizeof(def_filter));
4938         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
4939                                         ETH_ADDR_LEN);
4940         def_filter.vlan_tag = 0;
4941         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4942                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4943         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
4944         if (ret != I40E_SUCCESS) {
4945                 struct i40e_mac_filter *f;
4946                 struct ether_addr *mac;
4947
4948                 PMD_DRV_LOG(DEBUG,
4949                             "Cannot remove the default macvlan filter");
4950                 /* It needs to add the permanent mac into mac list */
4951                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4952                 if (f == NULL) {
4953                         PMD_DRV_LOG(ERR, "failed to allocate memory");
4954                         return I40E_ERR_NO_MEMORY;
4955                 }
4956                 mac = &f->mac_info.mac_addr;
4957                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
4958                                 ETH_ADDR_LEN);
4959                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4960                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4961                 vsi->mac_num++;
4962
4963                 return ret;
4964         }
4965         rte_memcpy(&filter.mac_addr,
4966                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
4967         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4968         return i40e_vsi_add_mac(vsi, &filter);
4969 }
4970
4971 /*
4972  * i40e_vsi_get_bw_config - Query VSI BW Information
4973  * @vsi: the VSI to be queried
4974  *
4975  * Returns 0 on success, negative value on failure
4976  */
4977 static enum i40e_status_code
4978 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
4979 {
4980         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
4981         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
4982         struct i40e_hw *hw = &vsi->adapter->hw;
4983         i40e_status ret;
4984         int i;
4985         uint32_t bw_max;
4986
4987         memset(&bw_config, 0, sizeof(bw_config));
4988         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4989         if (ret != I40E_SUCCESS) {
4990                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
4991                             hw->aq.asq_last_status);
4992                 return ret;
4993         }
4994
4995         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
4996         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
4997                                         &ets_sla_config, NULL);
4998         if (ret != I40E_SUCCESS) {
4999                 PMD_DRV_LOG(ERR,
5000                         "VSI failed to get TC bandwdith configuration %u",
5001                         hw->aq.asq_last_status);
5002                 return ret;
5003         }
5004
5005         /* store and print out BW info */
5006         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5007         vsi->bw_info.bw_max = bw_config.max_bw;
5008         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5009         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5010         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5011                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5012                      I40E_16_BIT_WIDTH);
5013         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5014                 vsi->bw_info.bw_ets_share_credits[i] =
5015                                 ets_sla_config.share_credits[i];
5016                 vsi->bw_info.bw_ets_credits[i] =
5017                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5018                 /* 4 bits per TC, 4th bit is reserved */
5019                 vsi->bw_info.bw_ets_max[i] =
5020                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5021                                   RTE_LEN2MASK(3, uint8_t));
5022                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5023                             vsi->bw_info.bw_ets_share_credits[i]);
5024                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5025                             vsi->bw_info.bw_ets_credits[i]);
5026                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5027                             vsi->bw_info.bw_ets_max[i]);
5028         }
5029
5030         return I40E_SUCCESS;
5031 }
5032
5033 /* i40e_enable_pf_lb
5034  * @pf: pointer to the pf structure
5035  *
5036  * allow loopback on pf
5037  */
5038 static inline void
5039 i40e_enable_pf_lb(struct i40e_pf *pf)
5040 {
5041         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5042         struct i40e_vsi_context ctxt;
5043         int ret;
5044
5045         /* Use the FW API if FW >= v5.0 */
5046         if (hw->aq.fw_maj_ver < 5) {
5047                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5048                 return;
5049         }
5050
5051         memset(&ctxt, 0, sizeof(ctxt));
5052         ctxt.seid = pf->main_vsi_seid;
5053         ctxt.pf_num = hw->pf_id;
5054         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5055         if (ret) {
5056                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5057                             ret, hw->aq.asq_last_status);
5058                 return;
5059         }
5060         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5061         ctxt.info.valid_sections =
5062                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5063         ctxt.info.switch_id |=
5064                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5065
5066         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5067         if (ret)
5068                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5069                             hw->aq.asq_last_status);
5070 }
5071
5072 /* Setup a VSI */
5073 struct i40e_vsi *
5074 i40e_vsi_setup(struct i40e_pf *pf,
5075                enum i40e_vsi_type type,
5076                struct i40e_vsi *uplink_vsi,
5077                uint16_t user_param)
5078 {
5079         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5080         struct i40e_vsi *vsi;
5081         struct i40e_mac_filter_info filter;
5082         int ret;
5083         struct i40e_vsi_context ctxt;
5084         struct ether_addr broadcast =
5085                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5086
5087         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5088             uplink_vsi == NULL) {
5089                 PMD_DRV_LOG(ERR,
5090                         "VSI setup failed, VSI link shouldn't be NULL");
5091                 return NULL;
5092         }
5093
5094         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5095                 PMD_DRV_LOG(ERR,
5096                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5097                 return NULL;
5098         }
5099
5100         /* two situations
5101          * 1.type is not MAIN and uplink vsi is not NULL
5102          * If uplink vsi didn't setup VEB, create one first under veb field
5103          * 2.type is SRIOV and the uplink is NULL
5104          * If floating VEB is NULL, create one veb under floating veb field
5105          */
5106
5107         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5108             uplink_vsi->veb == NULL) {
5109                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5110
5111                 if (uplink_vsi->veb == NULL) {
5112                         PMD_DRV_LOG(ERR, "VEB setup failed");
5113                         return NULL;
5114                 }
5115                 /* set ALLOWLOOPBACk on pf, when veb is created */
5116                 i40e_enable_pf_lb(pf);
5117         }
5118
5119         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5120             pf->main_vsi->floating_veb == NULL) {
5121                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5122
5123                 if (pf->main_vsi->floating_veb == NULL) {
5124                         PMD_DRV_LOG(ERR, "VEB setup failed");
5125                         return NULL;
5126                 }
5127         }
5128
5129         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5130         if (!vsi) {
5131                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5132                 return NULL;
5133         }
5134         TAILQ_INIT(&vsi->mac_list);
5135         vsi->type = type;
5136         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5137         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5138         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5139         vsi->user_param = user_param;
5140         vsi->vlan_anti_spoof_on = 0;
5141         vsi->vlan_filter_on = 0;
5142         /* Allocate queues */
5143         switch (vsi->type) {
5144         case I40E_VSI_MAIN  :
5145                 vsi->nb_qps = pf->lan_nb_qps;
5146                 break;
5147         case I40E_VSI_SRIOV :
5148                 vsi->nb_qps = pf->vf_nb_qps;
5149                 break;
5150         case I40E_VSI_VMDQ2:
5151                 vsi->nb_qps = pf->vmdq_nb_qps;
5152                 break;
5153         case I40E_VSI_FDIR:
5154                 vsi->nb_qps = pf->fdir_nb_qps;
5155                 break;
5156         default:
5157                 goto fail_mem;
5158         }
5159         /*
5160          * The filter status descriptor is reported in rx queue 0,
5161          * while the tx queue for fdir filter programming has no
5162          * such constraints, can be non-zero queues.
5163          * To simplify it, choose FDIR vsi use queue 0 pair.
5164          * To make sure it will use queue 0 pair, queue allocation
5165          * need be done before this function is called
5166          */
5167         if (type != I40E_VSI_FDIR) {
5168                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5169                         if (ret < 0) {
5170                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5171                                                 vsi->seid, ret);
5172                                 goto fail_mem;
5173                         }
5174                         vsi->base_queue = ret;
5175         } else
5176                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5177
5178         /* VF has MSIX interrupt in VF range, don't allocate here */
5179         if (type == I40E_VSI_MAIN) {
5180                 ret = i40e_res_pool_alloc(&pf->msix_pool,
5181                                           RTE_MIN(vsi->nb_qps,
5182                                                   RTE_MAX_RXTX_INTR_VEC_ID));
5183                 if (ret < 0) {
5184                         PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
5185                                     vsi->seid, ret);
5186                         goto fail_queue_alloc;
5187                 }
5188                 vsi->msix_intr = ret;
5189                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
5190         } else if (type != I40E_VSI_SRIOV) {
5191                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5192                 if (ret < 0) {
5193                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5194                         goto fail_queue_alloc;
5195                 }
5196                 vsi->msix_intr = ret;
5197                 vsi->nb_msix = 1;
5198         } else {
5199                 vsi->msix_intr = 0;
5200                 vsi->nb_msix = 0;
5201         }
5202
5203         /* Add VSI */
5204         if (type == I40E_VSI_MAIN) {
5205                 /* For main VSI, no need to add since it's default one */
5206                 vsi->uplink_seid = pf->mac_seid;
5207                 vsi->seid = pf->main_vsi_seid;
5208                 /* Bind queues with specific MSIX interrupt */
5209                 /**
5210                  * Needs 2 interrupt at least, one for misc cause which will
5211                  * enabled from OS side, Another for queues binding the
5212                  * interrupt from device side only.
5213                  */
5214
5215                 /* Get default VSI parameters from hardware */
5216                 memset(&ctxt, 0, sizeof(ctxt));
5217                 ctxt.seid = vsi->seid;
5218                 ctxt.pf_num = hw->pf_id;
5219                 ctxt.uplink_seid = vsi->uplink_seid;
5220                 ctxt.vf_num = 0;
5221                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5222                 if (ret != I40E_SUCCESS) {
5223                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5224                         goto fail_msix_alloc;
5225                 }
5226                 rte_memcpy(&vsi->info, &ctxt.info,
5227                         sizeof(struct i40e_aqc_vsi_properties_data));
5228                 vsi->vsi_id = ctxt.vsi_number;
5229                 vsi->info.valid_sections = 0;
5230
5231                 /* Configure tc, enabled TC0 only */
5232                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5233                         I40E_SUCCESS) {
5234                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5235                         goto fail_msix_alloc;
5236                 }
5237
5238                 /* TC, queue mapping */
5239                 memset(&ctxt, 0, sizeof(ctxt));
5240                 vsi->info.valid_sections |=
5241                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5242                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5243                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5244                 rte_memcpy(&ctxt.info, &vsi->info,
5245                         sizeof(struct i40e_aqc_vsi_properties_data));
5246                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5247                                                 I40E_DEFAULT_TCMAP);
5248                 if (ret != I40E_SUCCESS) {
5249                         PMD_DRV_LOG(ERR,
5250                                 "Failed to configure TC queue mapping");
5251                         goto fail_msix_alloc;
5252                 }
5253                 ctxt.seid = vsi->seid;
5254                 ctxt.pf_num = hw->pf_id;
5255                 ctxt.uplink_seid = vsi->uplink_seid;
5256                 ctxt.vf_num = 0;
5257
5258                 /* Update VSI parameters */
5259                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5260                 if (ret != I40E_SUCCESS) {
5261                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5262                         goto fail_msix_alloc;
5263                 }
5264
5265                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5266                                                 sizeof(vsi->info.tc_mapping));
5267                 rte_memcpy(&vsi->info.queue_mapping,
5268                                 &ctxt.info.queue_mapping,
5269                         sizeof(vsi->info.queue_mapping));
5270                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5271                 vsi->info.valid_sections = 0;
5272
5273                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5274                                 ETH_ADDR_LEN);
5275
5276                 /**
5277                  * Updating default filter settings are necessary to prevent
5278                  * reception of tagged packets.
5279                  * Some old firmware configurations load a default macvlan
5280                  * filter which accepts both tagged and untagged packets.
5281                  * The updating is to use a normal filter instead if needed.
5282                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5283                  * The firmware with correct configurations load the default
5284                  * macvlan filter which is expected and cannot be removed.
5285                  */
5286                 i40e_update_default_filter_setting(vsi);
5287                 i40e_config_qinq(hw, vsi);
5288         } else if (type == I40E_VSI_SRIOV) {
5289                 memset(&ctxt, 0, sizeof(ctxt));
5290                 /**
5291                  * For other VSI, the uplink_seid equals to uplink VSI's
5292                  * uplink_seid since they share same VEB
5293                  */
5294                 if (uplink_vsi == NULL)
5295                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5296                 else
5297                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5298                 ctxt.pf_num = hw->pf_id;
5299                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5300                 ctxt.uplink_seid = vsi->uplink_seid;
5301                 ctxt.connection_type = 0x1;
5302                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5303
5304                 /* Use the VEB configuration if FW >= v5.0 */
5305                 if (hw->aq.fw_maj_ver >= 5) {
5306                         /* Configure switch ID */
5307                         ctxt.info.valid_sections |=
5308                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5309                         ctxt.info.switch_id =
5310                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5311                 }
5312
5313                 /* Configure port/vlan */
5314                 ctxt.info.valid_sections |=
5315                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5316                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5317                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5318                                                 hw->func_caps.enabled_tcmap);
5319                 if (ret != I40E_SUCCESS) {
5320                         PMD_DRV_LOG(ERR,
5321                                 "Failed to configure TC queue mapping");
5322                         goto fail_msix_alloc;
5323                 }
5324
5325                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5326                 ctxt.info.valid_sections |=
5327                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5328                 /**
5329                  * Since VSI is not created yet, only configure parameter,
5330                  * will add vsi below.
5331                  */
5332
5333                 i40e_config_qinq(hw, vsi);
5334         } else if (type == I40E_VSI_VMDQ2) {
5335                 memset(&ctxt, 0, sizeof(ctxt));
5336                 /*
5337                  * For other VSI, the uplink_seid equals to uplink VSI's
5338                  * uplink_seid since they share same VEB
5339                  */
5340                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5341                 ctxt.pf_num = hw->pf_id;
5342                 ctxt.vf_num = 0;
5343                 ctxt.uplink_seid = vsi->uplink_seid;
5344                 ctxt.connection_type = 0x1;
5345                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5346
5347                 ctxt.info.valid_sections |=
5348                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5349                 /* user_param carries flag to enable loop back */
5350                 if (user_param) {
5351                         ctxt.info.switch_id =
5352                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5353                         ctxt.info.switch_id |=
5354                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5355                 }
5356
5357                 /* Configure port/vlan */
5358                 ctxt.info.valid_sections |=
5359                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5360                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5361                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5362                                                 I40E_DEFAULT_TCMAP);
5363                 if (ret != I40E_SUCCESS) {
5364                         PMD_DRV_LOG(ERR,
5365                                 "Failed to configure TC queue mapping");
5366                         goto fail_msix_alloc;
5367                 }
5368                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5369                 ctxt.info.valid_sections |=
5370                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5371         } else if (type == I40E_VSI_FDIR) {
5372                 memset(&ctxt, 0, sizeof(ctxt));
5373                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5374                 ctxt.pf_num = hw->pf_id;
5375                 ctxt.vf_num = 0;
5376                 ctxt.uplink_seid = vsi->uplink_seid;
5377                 ctxt.connection_type = 0x1;     /* regular data port */
5378                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5379                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5380                                                 I40E_DEFAULT_TCMAP);
5381                 if (ret != I40E_SUCCESS) {
5382                         PMD_DRV_LOG(ERR,
5383                                 "Failed to configure TC queue mapping.");
5384                         goto fail_msix_alloc;
5385                 }
5386                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5387                 ctxt.info.valid_sections |=
5388                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5389         } else {
5390                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5391                 goto fail_msix_alloc;
5392         }
5393
5394         if (vsi->type != I40E_VSI_MAIN) {
5395                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5396                 if (ret != I40E_SUCCESS) {
5397                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5398                                     hw->aq.asq_last_status);
5399                         goto fail_msix_alloc;
5400                 }
5401                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5402                 vsi->info.valid_sections = 0;
5403                 vsi->seid = ctxt.seid;
5404                 vsi->vsi_id = ctxt.vsi_number;
5405                 vsi->sib_vsi_list.vsi = vsi;
5406                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5407                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5408                                           &vsi->sib_vsi_list, list);
5409                 } else {
5410                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5411                                           &vsi->sib_vsi_list, list);
5412                 }
5413         }
5414
5415         /* MAC/VLAN configuration */
5416         rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5417         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5418
5419         ret = i40e_vsi_add_mac(vsi, &filter);
5420         if (ret != I40E_SUCCESS) {
5421                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5422                 goto fail_msix_alloc;
5423         }
5424
5425         /* Get VSI BW information */
5426         i40e_vsi_get_bw_config(vsi);
5427         return vsi;
5428 fail_msix_alloc:
5429         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5430 fail_queue_alloc:
5431         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5432 fail_mem:
5433         rte_free(vsi);
5434         return NULL;
5435 }
5436
5437 /* Configure vlan filter on or off */
5438 int
5439 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5440 {
5441         int i, num;
5442         struct i40e_mac_filter *f;
5443         void *temp;
5444         struct i40e_mac_filter_info *mac_filter;
5445         enum rte_mac_filter_type desired_filter;
5446         int ret = I40E_SUCCESS;
5447
5448         if (on) {
5449                 /* Filter to match MAC and VLAN */
5450                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5451         } else {
5452                 /* Filter to match only MAC */
5453                 desired_filter = RTE_MAC_PERFECT_MATCH;
5454         }
5455
5456         num = vsi->mac_num;
5457
5458         mac_filter = rte_zmalloc("mac_filter_info_data",
5459                                  num * sizeof(*mac_filter), 0);
5460         if (mac_filter == NULL) {
5461                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5462                 return I40E_ERR_NO_MEMORY;
5463         }
5464
5465         i = 0;
5466
5467         /* Remove all existing mac */
5468         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5469                 mac_filter[i] = f->mac_info;
5470                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5471                 if (ret) {
5472                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5473                                     on ? "enable" : "disable");
5474                         goto DONE;
5475                 }
5476                 i++;
5477         }
5478
5479         /* Override with new filter */
5480         for (i = 0; i < num; i++) {
5481                 mac_filter[i].filter_type = desired_filter;
5482                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5483                 if (ret) {
5484                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5485                                     on ? "enable" : "disable");
5486                         goto DONE;
5487                 }
5488         }
5489
5490 DONE:
5491         rte_free(mac_filter);
5492         return ret;
5493 }
5494
5495 /* Configure vlan stripping on or off */
5496 int
5497 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5498 {
5499         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5500         struct i40e_vsi_context ctxt;
5501         uint8_t vlan_flags;
5502         int ret = I40E_SUCCESS;
5503
5504         /* Check if it has been already on or off */
5505         if (vsi->info.valid_sections &
5506                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5507                 if (on) {
5508                         if ((vsi->info.port_vlan_flags &
5509                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5510                                 return 0; /* already on */
5511                 } else {
5512                         if ((vsi->info.port_vlan_flags &
5513                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5514                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5515                                 return 0; /* already off */
5516                 }
5517         }
5518
5519         if (on)
5520                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5521         else
5522                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5523         vsi->info.valid_sections =
5524                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5525         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5526         vsi->info.port_vlan_flags |= vlan_flags;
5527         ctxt.seid = vsi->seid;
5528         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5529         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5530         if (ret)
5531                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5532                             on ? "enable" : "disable");
5533
5534         return ret;
5535 }
5536
5537 static int
5538 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5539 {
5540         struct rte_eth_dev_data *data = dev->data;
5541         int ret;
5542         int mask = 0;
5543
5544         /* Apply vlan offload setting */
5545         mask = ETH_VLAN_STRIP_MASK |
5546                ETH_VLAN_FILTER_MASK |
5547                ETH_VLAN_EXTEND_MASK;
5548         ret = i40e_vlan_offload_set(dev, mask);
5549         if (ret) {
5550                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
5551                 return ret;
5552         }
5553
5554         /* Apply pvid setting */
5555         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5556                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
5557         if (ret)
5558                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5559
5560         return ret;
5561 }
5562
5563 static int
5564 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5565 {
5566         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5567
5568         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5569 }
5570
5571 static int
5572 i40e_update_flow_control(struct i40e_hw *hw)
5573 {
5574 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5575         struct i40e_link_status link_status;
5576         uint32_t rxfc = 0, txfc = 0, reg;
5577         uint8_t an_info;
5578         int ret;
5579
5580         memset(&link_status, 0, sizeof(link_status));
5581         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5582         if (ret != I40E_SUCCESS) {
5583                 PMD_DRV_LOG(ERR, "Failed to get link status information");
5584                 goto write_reg; /* Disable flow control */
5585         }
5586
5587         an_info = hw->phy.link_info.an_info;
5588         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5589                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5590                 ret = I40E_ERR_NOT_READY;
5591                 goto write_reg; /* Disable flow control */
5592         }
5593         /**
5594          * If link auto negotiation is enabled, flow control needs to
5595          * be configured according to it
5596          */
5597         switch (an_info & I40E_LINK_PAUSE_RXTX) {
5598         case I40E_LINK_PAUSE_RXTX:
5599                 rxfc = 1;
5600                 txfc = 1;
5601                 hw->fc.current_mode = I40E_FC_FULL;
5602                 break;
5603         case I40E_AQ_LINK_PAUSE_RX:
5604                 rxfc = 1;
5605                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5606                 break;
5607         case I40E_AQ_LINK_PAUSE_TX:
5608                 txfc = 1;
5609                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5610                 break;
5611         default:
5612                 hw->fc.current_mode = I40E_FC_NONE;
5613                 break;
5614         }
5615
5616 write_reg:
5617         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5618                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5619         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5620         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5621         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5622         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5623
5624         return ret;
5625 }
5626
5627 /* PF setup */
5628 static int
5629 i40e_pf_setup(struct i40e_pf *pf)
5630 {
5631         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5632         struct i40e_filter_control_settings settings;
5633         struct i40e_vsi *vsi;
5634         int ret;
5635
5636         /* Clear all stats counters */
5637         pf->offset_loaded = FALSE;
5638         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5639         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5640         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
5641         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
5642
5643         ret = i40e_pf_get_switch_config(pf);
5644         if (ret != I40E_SUCCESS) {
5645                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5646                 return ret;
5647         }
5648         if (pf->flags & I40E_FLAG_FDIR) {
5649                 /* make queue allocated first, let FDIR use queue pair 0*/
5650                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5651                 if (ret != I40E_FDIR_QUEUE_ID) {
5652                         PMD_DRV_LOG(ERR,
5653                                 "queue allocation fails for FDIR: ret =%d",
5654                                 ret);
5655                         pf->flags &= ~I40E_FLAG_FDIR;
5656                 }
5657         }
5658         /*  main VSI setup */
5659         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5660         if (!vsi) {
5661                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5662                 return I40E_ERR_NOT_READY;
5663         }
5664         pf->main_vsi = vsi;
5665
5666         /* Configure filter control */
5667         memset(&settings, 0, sizeof(settings));
5668         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5669                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5670         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5671                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5672         else {
5673                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5674                         hw->func_caps.rss_table_size);
5675                 return I40E_ERR_PARAM;
5676         }
5677         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5678                 hw->func_caps.rss_table_size);
5679         pf->hash_lut_size = hw->func_caps.rss_table_size;
5680
5681         /* Enable ethtype and macvlan filters */
5682         settings.enable_ethtype = TRUE;
5683         settings.enable_macvlan = TRUE;
5684         ret = i40e_set_filter_control(hw, &settings);
5685         if (ret)
5686                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5687                                                                 ret);
5688
5689         /* Update flow control according to the auto negotiation */
5690         i40e_update_flow_control(hw);
5691
5692         return I40E_SUCCESS;
5693 }
5694
5695 int
5696 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5697 {
5698         uint32_t reg;
5699         uint16_t j;
5700
5701         /**
5702          * Set or clear TX Queue Disable flags,
5703          * which is required by hardware.
5704          */
5705         i40e_pre_tx_queue_cfg(hw, q_idx, on);
5706         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5707
5708         /* Wait until the request is finished */
5709         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5710                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5711                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5712                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5713                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5714                                                         & 0x1))) {
5715                         break;
5716                 }
5717         }
5718         if (on) {
5719                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5720                         return I40E_SUCCESS; /* already on, skip next steps */
5721
5722                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5723                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5724         } else {
5725                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5726                         return I40E_SUCCESS; /* already off, skip next steps */
5727                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5728         }
5729         /* Write the register */
5730         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5731         /* Check the result */
5732         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5733                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5734                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5735                 if (on) {
5736                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5737                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5738                                 break;
5739                 } else {
5740                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5741                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5742                                 break;
5743                 }
5744         }
5745         /* Check if it is timeout */
5746         if (j >= I40E_CHK_Q_ENA_COUNT) {
5747                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5748                             (on ? "enable" : "disable"), q_idx);
5749                 return I40E_ERR_TIMEOUT;
5750         }
5751
5752         return I40E_SUCCESS;
5753 }
5754
5755 /* Swith on or off the tx queues */
5756 static int
5757 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5758 {
5759         struct rte_eth_dev_data *dev_data = pf->dev_data;
5760         struct i40e_tx_queue *txq;
5761         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5762         uint16_t i;
5763         int ret;
5764
5765         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5766                 txq = dev_data->tx_queues[i];
5767                 /* Don't operate the queue if not configured or
5768                  * if starting only per queue */
5769                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5770                         continue;
5771                 if (on)
5772                         ret = i40e_dev_tx_queue_start(dev, i);
5773                 else
5774                         ret = i40e_dev_tx_queue_stop(dev, i);
5775                 if ( ret != I40E_SUCCESS)
5776                         return ret;
5777         }
5778
5779         return I40E_SUCCESS;
5780 }
5781
5782 int
5783 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5784 {
5785         uint32_t reg;
5786         uint16_t j;
5787
5788         /* Wait until the request is finished */
5789         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5790                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5791                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5792                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5793                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5794                         break;
5795         }
5796
5797         if (on) {
5798                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5799                         return I40E_SUCCESS; /* Already on, skip next steps */
5800                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5801         } else {
5802                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5803                         return I40E_SUCCESS; /* Already off, skip next steps */
5804                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5805         }
5806
5807         /* Write the register */
5808         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5809         /* Check the result */
5810         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5811                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5812                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5813                 if (on) {
5814                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5815                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
5816                                 break;
5817                 } else {
5818                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5819                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5820                                 break;
5821                 }
5822         }
5823
5824         /* Check if it is timeout */
5825         if (j >= I40E_CHK_Q_ENA_COUNT) {
5826                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
5827                             (on ? "enable" : "disable"), q_idx);
5828                 return I40E_ERR_TIMEOUT;
5829         }
5830
5831         return I40E_SUCCESS;
5832 }
5833 /* Switch on or off the rx queues */
5834 static int
5835 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
5836 {
5837         struct rte_eth_dev_data *dev_data = pf->dev_data;
5838         struct i40e_rx_queue *rxq;
5839         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5840         uint16_t i;
5841         int ret;
5842
5843         for (i = 0; i < dev_data->nb_rx_queues; i++) {
5844                 rxq = dev_data->rx_queues[i];
5845                 /* Don't operate the queue if not configured or
5846                  * if starting only per queue */
5847                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
5848                         continue;
5849                 if (on)
5850                         ret = i40e_dev_rx_queue_start(dev, i);
5851                 else
5852                         ret = i40e_dev_rx_queue_stop(dev, i);
5853                 if (ret != I40E_SUCCESS)
5854                         return ret;
5855         }
5856
5857         return I40E_SUCCESS;
5858 }
5859
5860 /* Switch on or off all the rx/tx queues */
5861 int
5862 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
5863 {
5864         int ret;
5865
5866         if (on) {
5867                 /* enable rx queues before enabling tx queues */
5868                 ret = i40e_dev_switch_rx_queues(pf, on);
5869                 if (ret) {
5870                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
5871                         return ret;
5872                 }
5873                 ret = i40e_dev_switch_tx_queues(pf, on);
5874         } else {
5875                 /* Stop tx queues before stopping rx queues */
5876                 ret = i40e_dev_switch_tx_queues(pf, on);
5877                 if (ret) {
5878                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
5879                         return ret;
5880                 }
5881                 ret = i40e_dev_switch_rx_queues(pf, on);
5882         }
5883
5884         return ret;
5885 }
5886
5887 /* Initialize VSI for TX */
5888 static int
5889 i40e_dev_tx_init(struct i40e_pf *pf)
5890 {
5891         struct rte_eth_dev_data *data = pf->dev_data;
5892         uint16_t i;
5893         uint32_t ret = I40E_SUCCESS;
5894         struct i40e_tx_queue *txq;
5895
5896         for (i = 0; i < data->nb_tx_queues; i++) {
5897                 txq = data->tx_queues[i];
5898                 if (!txq || !txq->q_set)
5899                         continue;
5900                 ret = i40e_tx_queue_init(txq);
5901                 if (ret != I40E_SUCCESS)
5902                         break;
5903         }
5904         if (ret == I40E_SUCCESS)
5905                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
5906                                      ->eth_dev);
5907
5908         return ret;
5909 }
5910
5911 /* Initialize VSI for RX */
5912 static int
5913 i40e_dev_rx_init(struct i40e_pf *pf)
5914 {
5915         struct rte_eth_dev_data *data = pf->dev_data;
5916         int ret = I40E_SUCCESS;
5917         uint16_t i;
5918         struct i40e_rx_queue *rxq;
5919
5920         i40e_pf_config_mq_rx(pf);
5921         for (i = 0; i < data->nb_rx_queues; i++) {
5922                 rxq = data->rx_queues[i];
5923                 if (!rxq || !rxq->q_set)
5924                         continue;
5925
5926                 ret = i40e_rx_queue_init(rxq);
5927                 if (ret != I40E_SUCCESS) {
5928                         PMD_DRV_LOG(ERR,
5929                                 "Failed to do RX queue initialization");
5930                         break;
5931                 }
5932         }
5933         if (ret == I40E_SUCCESS)
5934                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
5935                                      ->eth_dev);
5936
5937         return ret;
5938 }
5939
5940 static int
5941 i40e_dev_rxtx_init(struct i40e_pf *pf)
5942 {
5943         int err;
5944
5945         err = i40e_dev_tx_init(pf);
5946         if (err) {
5947                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
5948                 return err;
5949         }
5950         err = i40e_dev_rx_init(pf);
5951         if (err) {
5952                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
5953                 return err;
5954         }
5955
5956         return err;
5957 }
5958
5959 static int
5960 i40e_vmdq_setup(struct rte_eth_dev *dev)
5961 {
5962         struct rte_eth_conf *conf = &dev->data->dev_conf;
5963         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5964         int i, err, conf_vsis, j, loop;
5965         struct i40e_vsi *vsi;
5966         struct i40e_vmdq_info *vmdq_info;
5967         struct rte_eth_vmdq_rx_conf *vmdq_conf;
5968         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5969
5970         /*
5971          * Disable interrupt to avoid message from VF. Furthermore, it will
5972          * avoid race condition in VSI creation/destroy.
5973          */
5974         i40e_pf_disable_irq0(hw);
5975
5976         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
5977                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
5978                 return -ENOTSUP;
5979         }
5980
5981         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
5982         if (conf_vsis > pf->max_nb_vmdq_vsi) {
5983                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
5984                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
5985                         pf->max_nb_vmdq_vsi);
5986                 return -ENOTSUP;
5987         }
5988
5989         if (pf->vmdq != NULL) {
5990                 PMD_INIT_LOG(INFO, "VMDQ already configured");
5991                 return 0;
5992         }
5993
5994         pf->vmdq = rte_zmalloc("vmdq_info_struct",
5995                                 sizeof(*vmdq_info) * conf_vsis, 0);
5996
5997         if (pf->vmdq == NULL) {
5998                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
5999                 return -ENOMEM;
6000         }
6001
6002         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6003
6004         /* Create VMDQ VSI */
6005         for (i = 0; i < conf_vsis; i++) {
6006                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6007                                 vmdq_conf->enable_loop_back);
6008                 if (vsi == NULL) {
6009                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6010                         err = -1;
6011                         goto err_vsi_setup;
6012                 }
6013                 vmdq_info = &pf->vmdq[i];
6014                 vmdq_info->pf = pf;
6015                 vmdq_info->vsi = vsi;
6016         }
6017         pf->nb_cfg_vmdq_vsi = conf_vsis;
6018
6019         /* Configure Vlan */
6020         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6021         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6022                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6023                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6024                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6025                                         vmdq_conf->pool_map[i].vlan_id, j);
6026
6027                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6028                                                 vmdq_conf->pool_map[i].vlan_id);
6029                                 if (err) {
6030                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6031                                         err = -1;
6032                                         goto err_vsi_setup;
6033                                 }
6034                         }
6035                 }
6036         }
6037
6038         i40e_pf_enable_irq0(hw);
6039
6040         return 0;
6041
6042 err_vsi_setup:
6043         for (i = 0; i < conf_vsis; i++)
6044                 if (pf->vmdq[i].vsi == NULL)
6045                         break;
6046                 else
6047                         i40e_vsi_release(pf->vmdq[i].vsi);
6048
6049         rte_free(pf->vmdq);
6050         pf->vmdq = NULL;
6051         i40e_pf_enable_irq0(hw);
6052         return err;
6053 }
6054
6055 static void
6056 i40e_stat_update_32(struct i40e_hw *hw,
6057                    uint32_t reg,
6058                    bool offset_loaded,
6059                    uint64_t *offset,
6060                    uint64_t *stat)
6061 {
6062         uint64_t new_data;
6063
6064         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6065         if (!offset_loaded)
6066                 *offset = new_data;
6067
6068         if (new_data >= *offset)
6069                 *stat = (uint64_t)(new_data - *offset);
6070         else
6071                 *stat = (uint64_t)((new_data +
6072                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6073 }
6074
6075 static void
6076 i40e_stat_update_48(struct i40e_hw *hw,
6077                    uint32_t hireg,
6078                    uint32_t loreg,
6079                    bool offset_loaded,
6080                    uint64_t *offset,
6081                    uint64_t *stat)
6082 {
6083         uint64_t new_data;
6084
6085         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6086         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6087                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6088
6089         if (!offset_loaded)
6090                 *offset = new_data;
6091
6092         if (new_data >= *offset)
6093                 *stat = new_data - *offset;
6094         else
6095                 *stat = (uint64_t)((new_data +
6096                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6097
6098         *stat &= I40E_48_BIT_MASK;
6099 }
6100
6101 /* Disable IRQ0 */
6102 void
6103 i40e_pf_disable_irq0(struct i40e_hw *hw)
6104 {
6105         /* Disable all interrupt types */
6106         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
6107         I40E_WRITE_FLUSH(hw);
6108 }
6109
6110 /* Enable IRQ0 */
6111 void
6112 i40e_pf_enable_irq0(struct i40e_hw *hw)
6113 {
6114         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6115                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6116                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6117                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6118         I40E_WRITE_FLUSH(hw);
6119 }
6120
6121 static void
6122 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6123 {
6124         /* read pending request and disable first */
6125         i40e_pf_disable_irq0(hw);
6126         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6127         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6128                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6129
6130         if (no_queue)
6131                 /* Link no queues with irq0 */
6132                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6133                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6134 }
6135
6136 static void
6137 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6138 {
6139         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6140         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6141         int i;
6142         uint16_t abs_vf_id;
6143         uint32_t index, offset, val;
6144
6145         if (!pf->vfs)
6146                 return;
6147         /**
6148          * Try to find which VF trigger a reset, use absolute VF id to access
6149          * since the reg is global register.
6150          */
6151         for (i = 0; i < pf->vf_num; i++) {
6152                 abs_vf_id = hw->func_caps.vf_base_id + i;
6153                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6154                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6155                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6156                 /* VFR event occurred */
6157                 if (val & (0x1 << offset)) {
6158                         int ret;
6159
6160                         /* Clear the event first */
6161                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6162                                                         (0x1 << offset));
6163                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6164                         /**
6165                          * Only notify a VF reset event occurred,
6166                          * don't trigger another SW reset
6167                          */
6168                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6169                         if (ret != I40E_SUCCESS)
6170                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6171                 }
6172         }
6173 }
6174
6175 static void
6176 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6177 {
6178         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6179         int i;
6180
6181         for (i = 0; i < pf->vf_num; i++)
6182                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6183 }
6184
6185 static void
6186 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6187 {
6188         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6189         struct i40e_arq_event_info info;
6190         uint16_t pending, opcode;
6191         int ret;
6192
6193         info.buf_len = I40E_AQ_BUF_SZ;
6194         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6195         if (!info.msg_buf) {
6196                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6197                 return;
6198         }
6199
6200         pending = 1;
6201         while (pending) {
6202                 ret = i40e_clean_arq_element(hw, &info, &pending);
6203
6204                 if (ret != I40E_SUCCESS) {
6205                         PMD_DRV_LOG(INFO,
6206                                 "Failed to read msg from AdminQ, aq_err: %u",
6207                                 hw->aq.asq_last_status);
6208                         break;
6209                 }
6210                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6211
6212                 switch (opcode) {
6213                 case i40e_aqc_opc_send_msg_to_pf:
6214                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6215                         i40e_pf_host_handle_vf_msg(dev,
6216                                         rte_le_to_cpu_16(info.desc.retval),
6217                                         rte_le_to_cpu_32(info.desc.cookie_high),
6218                                         rte_le_to_cpu_32(info.desc.cookie_low),
6219                                         info.msg_buf,
6220                                         info.msg_len);
6221                         break;
6222                 case i40e_aqc_opc_get_link_status:
6223                         ret = i40e_dev_link_update(dev, 0);
6224                         if (!ret)
6225                                 _rte_eth_dev_callback_process(dev,
6226                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6227                         break;
6228                 default:
6229                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6230                                     opcode);
6231                         break;
6232                 }
6233         }
6234         rte_free(info.msg_buf);
6235 }
6236
6237 /**
6238  * Interrupt handler triggered by NIC  for handling
6239  * specific interrupt.
6240  *
6241  * @param handle
6242  *  Pointer to interrupt handle.
6243  * @param param
6244  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6245  *
6246  * @return
6247  *  void
6248  */
6249 static void
6250 i40e_dev_interrupt_handler(void *param)
6251 {
6252         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6253         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6254         uint32_t icr0;
6255
6256         /* Disable interrupt */
6257         i40e_pf_disable_irq0(hw);
6258
6259         /* read out interrupt causes */
6260         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6261
6262         /* No interrupt event indicated */
6263         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6264                 PMD_DRV_LOG(INFO, "No interrupt event");
6265                 goto done;
6266         }
6267         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6268                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6269         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6270                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6271         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6272                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6273         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6274                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6275         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6276                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6277         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6278                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6279         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6280                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6281
6282         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6283                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6284                 i40e_dev_handle_vfr_event(dev);
6285         }
6286         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6287                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6288                 i40e_dev_handle_aq_msg(dev);
6289         }
6290
6291 done:
6292         /* Enable interrupt */
6293         i40e_pf_enable_irq0(hw);
6294         rte_intr_enable(dev->intr_handle);
6295 }
6296
6297 int
6298 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6299                          struct i40e_macvlan_filter *filter,
6300                          int total)
6301 {
6302         int ele_num, ele_buff_size;
6303         int num, actual_num, i;
6304         uint16_t flags;
6305         int ret = I40E_SUCCESS;
6306         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6307         struct i40e_aqc_add_macvlan_element_data *req_list;
6308
6309         if (filter == NULL  || total == 0)
6310                 return I40E_ERR_PARAM;
6311         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6312         ele_buff_size = hw->aq.asq_buf_size;
6313
6314         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6315         if (req_list == NULL) {
6316                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6317                 return I40E_ERR_NO_MEMORY;
6318         }
6319
6320         num = 0;
6321         do {
6322                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6323                 memset(req_list, 0, ele_buff_size);
6324
6325                 for (i = 0; i < actual_num; i++) {
6326                         rte_memcpy(req_list[i].mac_addr,
6327                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6328                         req_list[i].vlan_tag =
6329                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6330
6331                         switch (filter[num + i].filter_type) {
6332                         case RTE_MAC_PERFECT_MATCH:
6333                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6334                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6335                                 break;
6336                         case RTE_MACVLAN_PERFECT_MATCH:
6337                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6338                                 break;
6339                         case RTE_MAC_HASH_MATCH:
6340                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6341                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6342                                 break;
6343                         case RTE_MACVLAN_HASH_MATCH:
6344                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6345                                 break;
6346                         default:
6347                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6348                                 ret = I40E_ERR_PARAM;
6349                                 goto DONE;
6350                         }
6351
6352                         req_list[i].queue_number = 0;
6353
6354                         req_list[i].flags = rte_cpu_to_le_16(flags);
6355                 }
6356
6357                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6358                                                 actual_num, NULL);
6359                 if (ret != I40E_SUCCESS) {
6360                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6361                         goto DONE;
6362                 }
6363                 num += actual_num;
6364         } while (num < total);
6365
6366 DONE:
6367         rte_free(req_list);
6368         return ret;
6369 }
6370
6371 int
6372 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6373                             struct i40e_macvlan_filter *filter,
6374                             int total)
6375 {
6376         int ele_num, ele_buff_size;
6377         int num, actual_num, i;
6378         uint16_t flags;
6379         int ret = I40E_SUCCESS;
6380         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6381         struct i40e_aqc_remove_macvlan_element_data *req_list;
6382
6383         if (filter == NULL  || total == 0)
6384                 return I40E_ERR_PARAM;
6385
6386         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6387         ele_buff_size = hw->aq.asq_buf_size;
6388
6389         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6390         if (req_list == NULL) {
6391                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6392                 return I40E_ERR_NO_MEMORY;
6393         }
6394
6395         num = 0;
6396         do {
6397                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6398                 memset(req_list, 0, ele_buff_size);
6399
6400                 for (i = 0; i < actual_num; i++) {
6401                         rte_memcpy(req_list[i].mac_addr,
6402                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6403                         req_list[i].vlan_tag =
6404                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6405
6406                         switch (filter[num + i].filter_type) {
6407                         case RTE_MAC_PERFECT_MATCH:
6408                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6409                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6410                                 break;
6411                         case RTE_MACVLAN_PERFECT_MATCH:
6412                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6413                                 break;
6414                         case RTE_MAC_HASH_MATCH:
6415                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6416                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6417                                 break;
6418                         case RTE_MACVLAN_HASH_MATCH:
6419                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6420                                 break;
6421                         default:
6422                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6423                                 ret = I40E_ERR_PARAM;
6424                                 goto DONE;
6425                         }
6426                         req_list[i].flags = rte_cpu_to_le_16(flags);
6427                 }
6428
6429                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6430                                                 actual_num, NULL);
6431                 if (ret != I40E_SUCCESS) {
6432                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6433                         goto DONE;
6434                 }
6435                 num += actual_num;
6436         } while (num < total);
6437
6438 DONE:
6439         rte_free(req_list);
6440         return ret;
6441 }
6442
6443 /* Find out specific MAC filter */
6444 static struct i40e_mac_filter *
6445 i40e_find_mac_filter(struct i40e_vsi *vsi,
6446                          struct ether_addr *macaddr)
6447 {
6448         struct i40e_mac_filter *f;
6449
6450         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6451                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6452                         return f;
6453         }
6454
6455         return NULL;
6456 }
6457
6458 static bool
6459 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6460                          uint16_t vlan_id)
6461 {
6462         uint32_t vid_idx, vid_bit;
6463
6464         if (vlan_id > ETH_VLAN_ID_MAX)
6465                 return 0;
6466
6467         vid_idx = I40E_VFTA_IDX(vlan_id);
6468         vid_bit = I40E_VFTA_BIT(vlan_id);
6469
6470         if (vsi->vfta[vid_idx] & vid_bit)
6471                 return 1;
6472         else
6473                 return 0;
6474 }
6475
6476 static void
6477 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6478                        uint16_t vlan_id, bool on)
6479 {
6480         uint32_t vid_idx, vid_bit;
6481
6482         vid_idx = I40E_VFTA_IDX(vlan_id);
6483         vid_bit = I40E_VFTA_BIT(vlan_id);
6484
6485         if (on)
6486                 vsi->vfta[vid_idx] |= vid_bit;
6487         else
6488                 vsi->vfta[vid_idx] &= ~vid_bit;
6489 }
6490
6491 void
6492 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6493                      uint16_t vlan_id, bool on)
6494 {
6495         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6496         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6497         int ret;
6498
6499         if (vlan_id > ETH_VLAN_ID_MAX)
6500                 return;
6501
6502         i40e_store_vlan_filter(vsi, vlan_id, on);
6503
6504         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6505                 return;
6506
6507         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6508
6509         if (on) {
6510                 ret = i40e_aq_add_vlan(hw, vsi->seid,
6511                                        &vlan_data, 1, NULL);
6512                 if (ret != I40E_SUCCESS)
6513                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6514         } else {
6515                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6516                                           &vlan_data, 1, NULL);
6517                 if (ret != I40E_SUCCESS)
6518                         PMD_DRV_LOG(ERR,
6519                                     "Failed to remove vlan filter");
6520         }
6521 }
6522
6523 /**
6524  * Find all vlan options for specific mac addr,
6525  * return with actual vlan found.
6526  */
6527 int
6528 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6529                            struct i40e_macvlan_filter *mv_f,
6530                            int num, struct ether_addr *addr)
6531 {
6532         int i;
6533         uint32_t j, k;
6534
6535         /**
6536          * Not to use i40e_find_vlan_filter to decrease the loop time,
6537          * although the code looks complex.
6538           */
6539         if (num < vsi->vlan_num)
6540                 return I40E_ERR_PARAM;
6541
6542         i = 0;
6543         for (j = 0; j < I40E_VFTA_SIZE; j++) {
6544                 if (vsi->vfta[j]) {
6545                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6546                                 if (vsi->vfta[j] & (1 << k)) {
6547                                         if (i > num - 1) {
6548                                                 PMD_DRV_LOG(ERR,
6549                                                         "vlan number doesn't match");
6550                                                 return I40E_ERR_PARAM;
6551                                         }
6552                                         rte_memcpy(&mv_f[i].macaddr,
6553                                                         addr, ETH_ADDR_LEN);
6554                                         mv_f[i].vlan_id =
6555                                                 j * I40E_UINT32_BIT_SIZE + k;
6556                                         i++;
6557                                 }
6558                         }
6559                 }
6560         }
6561         return I40E_SUCCESS;
6562 }
6563
6564 static inline int
6565 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6566                            struct i40e_macvlan_filter *mv_f,
6567                            int num,
6568                            uint16_t vlan)
6569 {
6570         int i = 0;
6571         struct i40e_mac_filter *f;
6572
6573         if (num < vsi->mac_num)
6574                 return I40E_ERR_PARAM;
6575
6576         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6577                 if (i > num - 1) {
6578                         PMD_DRV_LOG(ERR, "buffer number not match");
6579                         return I40E_ERR_PARAM;
6580                 }
6581                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6582                                 ETH_ADDR_LEN);
6583                 mv_f[i].vlan_id = vlan;
6584                 mv_f[i].filter_type = f->mac_info.filter_type;
6585                 i++;
6586         }
6587
6588         return I40E_SUCCESS;
6589 }
6590
6591 static int
6592 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6593 {
6594         int i, j, num;
6595         struct i40e_mac_filter *f;
6596         struct i40e_macvlan_filter *mv_f;
6597         int ret = I40E_SUCCESS;
6598
6599         if (vsi == NULL || vsi->mac_num == 0)
6600                 return I40E_ERR_PARAM;
6601
6602         /* Case that no vlan is set */
6603         if (vsi->vlan_num == 0)
6604                 num = vsi->mac_num;
6605         else
6606                 num = vsi->mac_num * vsi->vlan_num;
6607
6608         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6609         if (mv_f == NULL) {
6610                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6611                 return I40E_ERR_NO_MEMORY;
6612         }
6613
6614         i = 0;
6615         if (vsi->vlan_num == 0) {
6616                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6617                         rte_memcpy(&mv_f[i].macaddr,
6618                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6619                         mv_f[i].filter_type = f->mac_info.filter_type;
6620                         mv_f[i].vlan_id = 0;
6621                         i++;
6622                 }
6623         } else {
6624                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6625                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6626                                         vsi->vlan_num, &f->mac_info.mac_addr);
6627                         if (ret != I40E_SUCCESS)
6628                                 goto DONE;
6629                         for (j = i; j < i + vsi->vlan_num; j++)
6630                                 mv_f[j].filter_type = f->mac_info.filter_type;
6631                         i += vsi->vlan_num;
6632                 }
6633         }
6634
6635         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6636 DONE:
6637         rte_free(mv_f);
6638
6639         return ret;
6640 }
6641
6642 int
6643 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6644 {
6645         struct i40e_macvlan_filter *mv_f;
6646         int mac_num;
6647         int ret = I40E_SUCCESS;
6648
6649         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6650                 return I40E_ERR_PARAM;
6651
6652         /* If it's already set, just return */
6653         if (i40e_find_vlan_filter(vsi,vlan))
6654                 return I40E_SUCCESS;
6655
6656         mac_num = vsi->mac_num;
6657
6658         if (mac_num == 0) {
6659                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6660                 return I40E_ERR_PARAM;
6661         }
6662
6663         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6664
6665         if (mv_f == NULL) {
6666                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6667                 return I40E_ERR_NO_MEMORY;
6668         }
6669
6670         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6671
6672         if (ret != I40E_SUCCESS)
6673                 goto DONE;
6674
6675         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6676
6677         if (ret != I40E_SUCCESS)
6678                 goto DONE;
6679
6680         i40e_set_vlan_filter(vsi, vlan, 1);
6681
6682         vsi->vlan_num++;
6683         ret = I40E_SUCCESS;
6684 DONE:
6685         rte_free(mv_f);
6686         return ret;
6687 }
6688
6689 int
6690 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6691 {
6692         struct i40e_macvlan_filter *mv_f;
6693         int mac_num;
6694         int ret = I40E_SUCCESS;
6695
6696         /**
6697          * Vlan 0 is the generic filter for untagged packets
6698          * and can't be removed.
6699          */
6700         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6701                 return I40E_ERR_PARAM;
6702
6703         /* If can't find it, just return */
6704         if (!i40e_find_vlan_filter(vsi, vlan))
6705                 return I40E_ERR_PARAM;
6706
6707         mac_num = vsi->mac_num;
6708
6709         if (mac_num == 0) {
6710                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6711                 return I40E_ERR_PARAM;
6712         }
6713
6714         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6715
6716         if (mv_f == NULL) {
6717                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6718                 return I40E_ERR_NO_MEMORY;
6719         }
6720
6721         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6722
6723         if (ret != I40E_SUCCESS)
6724                 goto DONE;
6725
6726         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6727
6728         if (ret != I40E_SUCCESS)
6729                 goto DONE;
6730
6731         /* This is last vlan to remove, replace all mac filter with vlan 0 */
6732         if (vsi->vlan_num == 1) {
6733                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6734                 if (ret != I40E_SUCCESS)
6735                         goto DONE;
6736
6737                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6738                 if (ret != I40E_SUCCESS)
6739                         goto DONE;
6740         }
6741
6742         i40e_set_vlan_filter(vsi, vlan, 0);
6743
6744         vsi->vlan_num--;
6745         ret = I40E_SUCCESS;
6746 DONE:
6747         rte_free(mv_f);
6748         return ret;
6749 }
6750
6751 int
6752 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6753 {
6754         struct i40e_mac_filter *f;
6755         struct i40e_macvlan_filter *mv_f;
6756         int i, vlan_num = 0;
6757         int ret = I40E_SUCCESS;
6758
6759         /* If it's add and we've config it, return */
6760         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6761         if (f != NULL)
6762                 return I40E_SUCCESS;
6763         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6764                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6765
6766                 /**
6767                  * If vlan_num is 0, that's the first time to add mac,
6768                  * set mask for vlan_id 0.
6769                  */
6770                 if (vsi->vlan_num == 0) {
6771                         i40e_set_vlan_filter(vsi, 0, 1);
6772                         vsi->vlan_num = 1;
6773                 }
6774                 vlan_num = vsi->vlan_num;
6775         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6776                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6777                 vlan_num = 1;
6778
6779         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6780         if (mv_f == NULL) {
6781                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6782                 return I40E_ERR_NO_MEMORY;
6783         }
6784
6785         for (i = 0; i < vlan_num; i++) {
6786                 mv_f[i].filter_type = mac_filter->filter_type;
6787                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6788                                 ETH_ADDR_LEN);
6789         }
6790
6791         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6792                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6793                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6794                                         &mac_filter->mac_addr);
6795                 if (ret != I40E_SUCCESS)
6796                         goto DONE;
6797         }
6798
6799         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6800         if (ret != I40E_SUCCESS)
6801                 goto DONE;
6802
6803         /* Add the mac addr into mac list */
6804         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6805         if (f == NULL) {
6806                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6807                 ret = I40E_ERR_NO_MEMORY;
6808                 goto DONE;
6809         }
6810         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6811                         ETH_ADDR_LEN);
6812         f->mac_info.filter_type = mac_filter->filter_type;
6813         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6814         vsi->mac_num++;
6815
6816         ret = I40E_SUCCESS;
6817 DONE:
6818         rte_free(mv_f);
6819
6820         return ret;
6821 }
6822
6823 int
6824 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
6825 {
6826         struct i40e_mac_filter *f;
6827         struct i40e_macvlan_filter *mv_f;
6828         int i, vlan_num;
6829         enum rte_mac_filter_type filter_type;
6830         int ret = I40E_SUCCESS;
6831
6832         /* Can't find it, return an error */
6833         f = i40e_find_mac_filter(vsi, addr);
6834         if (f == NULL)
6835                 return I40E_ERR_PARAM;
6836
6837         vlan_num = vsi->vlan_num;
6838         filter_type = f->mac_info.filter_type;
6839         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6840                 filter_type == RTE_MACVLAN_HASH_MATCH) {
6841                 if (vlan_num == 0) {
6842                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
6843                         return I40E_ERR_PARAM;
6844                 }
6845         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
6846                         filter_type == RTE_MAC_HASH_MATCH)
6847                 vlan_num = 1;
6848
6849         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6850         if (mv_f == NULL) {
6851                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6852                 return I40E_ERR_NO_MEMORY;
6853         }
6854
6855         for (i = 0; i < vlan_num; i++) {
6856                 mv_f[i].filter_type = filter_type;
6857                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6858                                 ETH_ADDR_LEN);
6859         }
6860         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6861                         filter_type == RTE_MACVLAN_HASH_MATCH) {
6862                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
6863                 if (ret != I40E_SUCCESS)
6864                         goto DONE;
6865         }
6866
6867         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
6868         if (ret != I40E_SUCCESS)
6869                 goto DONE;
6870
6871         /* Remove the mac addr into mac list */
6872         TAILQ_REMOVE(&vsi->mac_list, f, next);
6873         rte_free(f);
6874         vsi->mac_num--;
6875
6876         ret = I40E_SUCCESS;
6877 DONE:
6878         rte_free(mv_f);
6879         return ret;
6880 }
6881
6882 /* Configure hash enable flags for RSS */
6883 uint64_t
6884 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
6885 {
6886         uint64_t hena = 0;
6887         int i;
6888
6889         if (!flags)
6890                 return hena;
6891
6892         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
6893                 if (flags & (1ULL << i))
6894                         hena |= adapter->pctypes_tbl[i];
6895         }
6896
6897         return hena;
6898 }
6899
6900 /* Parse the hash enable flags */
6901 uint64_t
6902 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
6903 {
6904         uint64_t rss_hf = 0;
6905
6906         if (!flags)
6907                 return rss_hf;
6908         int i;
6909
6910         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
6911                 if (flags & adapter->pctypes_tbl[i])
6912                         rss_hf |= (1ULL << i);
6913         }
6914         return rss_hf;
6915 }
6916
6917 /* Disable RSS */
6918 static void
6919 i40e_pf_disable_rss(struct i40e_pf *pf)
6920 {
6921         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6922
6923         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
6924         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
6925         I40E_WRITE_FLUSH(hw);
6926 }
6927
6928 int
6929 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
6930 {
6931         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6932         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6933         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
6934                            I40E_VFQF_HKEY_MAX_INDEX :
6935                            I40E_PFQF_HKEY_MAX_INDEX;
6936         int ret = 0;
6937
6938         if (!key || key_len == 0) {
6939                 PMD_DRV_LOG(DEBUG, "No key to be configured");
6940                 return 0;
6941         } else if (key_len != (key_idx + 1) *
6942                 sizeof(uint32_t)) {
6943                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
6944                 return -EINVAL;
6945         }
6946
6947         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6948                 struct i40e_aqc_get_set_rss_key_data *key_dw =
6949                         (struct i40e_aqc_get_set_rss_key_data *)key;
6950
6951                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
6952                 if (ret)
6953                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
6954         } else {
6955                 uint32_t *hash_key = (uint32_t *)key;
6956                 uint16_t i;
6957
6958                 if (vsi->type == I40E_VSI_SRIOV) {
6959                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
6960                                 I40E_WRITE_REG(
6961                                         hw,
6962                                         I40E_VFQF_HKEY1(i, vsi->user_param),
6963                                         hash_key[i]);
6964
6965                 } else {
6966                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6967                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
6968                                                hash_key[i]);
6969                 }
6970                 I40E_WRITE_FLUSH(hw);
6971         }
6972
6973         return ret;
6974 }
6975
6976 static int
6977 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
6978 {
6979         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6980         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6981         uint32_t reg;
6982         int ret;
6983
6984         if (!key || !key_len)
6985                 return -EINVAL;
6986
6987         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6988                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
6989                         (struct i40e_aqc_get_set_rss_key_data *)key);
6990                 if (ret) {
6991                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
6992                         return ret;
6993                 }
6994         } else {
6995                 uint32_t *key_dw = (uint32_t *)key;
6996                 uint16_t i;
6997
6998                 if (vsi->type == I40E_VSI_SRIOV) {
6999                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7000                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7001                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7002                         }
7003                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7004                                    sizeof(uint32_t);
7005                 } else {
7006                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7007                                 reg = I40E_PFQF_HKEY(i);
7008                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7009                         }
7010                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7011                                    sizeof(uint32_t);
7012                 }
7013         }
7014         return 0;
7015 }
7016
7017 static int
7018 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7019 {
7020         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7021         uint64_t hena;
7022         int ret;
7023
7024         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7025                                rss_conf->rss_key_len);
7026         if (ret)
7027                 return ret;
7028
7029         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7030         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7031         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7032         I40E_WRITE_FLUSH(hw);
7033
7034         return 0;
7035 }
7036
7037 static int
7038 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7039                          struct rte_eth_rss_conf *rss_conf)
7040 {
7041         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7042         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7043         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7044         uint64_t hena;
7045
7046         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7047         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7048
7049         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7050                 if (rss_hf != 0) /* Enable RSS */
7051                         return -EINVAL;
7052                 return 0; /* Nothing to do */
7053         }
7054         /* RSS enabled */
7055         if (rss_hf == 0) /* Disable RSS */
7056                 return -EINVAL;
7057
7058         return i40e_hw_rss_hash_set(pf, rss_conf);
7059 }
7060
7061 static int
7062 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7063                            struct rte_eth_rss_conf *rss_conf)
7064 {
7065         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7066         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7067         uint64_t hena;
7068
7069         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7070                          &rss_conf->rss_key_len);
7071
7072         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7073         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7074         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7075
7076         return 0;
7077 }
7078
7079 static int
7080 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7081 {
7082         switch (filter_type) {
7083         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7084                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7085                 break;
7086         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7087                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7088                 break;
7089         case RTE_TUNNEL_FILTER_IMAC_TENID:
7090                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7091                 break;
7092         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7093                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7094                 break;
7095         case ETH_TUNNEL_FILTER_IMAC:
7096                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7097                 break;
7098         case ETH_TUNNEL_FILTER_OIP:
7099                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7100                 break;
7101         case ETH_TUNNEL_FILTER_IIP:
7102                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7103                 break;
7104         default:
7105                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7106                 return -EINVAL;
7107         }
7108
7109         return 0;
7110 }
7111
7112 /* Convert tunnel filter structure */
7113 static int
7114 i40e_tunnel_filter_convert(
7115         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
7116         struct i40e_tunnel_filter *tunnel_filter)
7117 {
7118         ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
7119                         (struct ether_addr *)&tunnel_filter->input.outer_mac);
7120         ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
7121                         (struct ether_addr *)&tunnel_filter->input.inner_mac);
7122         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7123         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7124              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7125             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7126                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7127         else
7128                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7129         tunnel_filter->input.flags = cld_filter->element.flags;
7130         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7131         tunnel_filter->queue = cld_filter->element.queue_number;
7132         rte_memcpy(tunnel_filter->input.general_fields,
7133                    cld_filter->general_fields,
7134                    sizeof(cld_filter->general_fields));
7135
7136         return 0;
7137 }
7138
7139 /* Check if there exists the tunnel filter */
7140 struct i40e_tunnel_filter *
7141 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7142                              const struct i40e_tunnel_filter_input *input)
7143 {
7144         int ret;
7145
7146         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7147         if (ret < 0)
7148                 return NULL;
7149
7150         return tunnel_rule->hash_map[ret];
7151 }
7152
7153 /* Add a tunnel filter into the SW list */
7154 static int
7155 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7156                              struct i40e_tunnel_filter *tunnel_filter)
7157 {
7158         struct i40e_tunnel_rule *rule = &pf->tunnel;
7159         int ret;
7160
7161         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7162         if (ret < 0) {
7163                 PMD_DRV_LOG(ERR,
7164                             "Failed to insert tunnel filter to hash table %d!",
7165                             ret);
7166                 return ret;
7167         }
7168         rule->hash_map[ret] = tunnel_filter;
7169
7170         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7171
7172         return 0;
7173 }
7174
7175 /* Delete a tunnel filter from the SW list */
7176 int
7177 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7178                           struct i40e_tunnel_filter_input *input)
7179 {
7180         struct i40e_tunnel_rule *rule = &pf->tunnel;
7181         struct i40e_tunnel_filter *tunnel_filter;
7182         int ret;
7183
7184         ret = rte_hash_del_key(rule->hash_table, input);
7185         if (ret < 0) {
7186                 PMD_DRV_LOG(ERR,
7187                             "Failed to delete tunnel filter to hash table %d!",
7188                             ret);
7189                 return ret;
7190         }
7191         tunnel_filter = rule->hash_map[ret];
7192         rule->hash_map[ret] = NULL;
7193
7194         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7195         rte_free(tunnel_filter);
7196
7197         return 0;
7198 }
7199
7200 int
7201 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7202                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
7203                         uint8_t add)
7204 {
7205         uint16_t ip_type;
7206         uint32_t ipv4_addr, ipv4_addr_le;
7207         uint8_t i, tun_type = 0;
7208         /* internal varialbe to convert ipv6 byte order */
7209         uint32_t convert_ipv6[4];
7210         int val, ret = 0;
7211         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7212         struct i40e_vsi *vsi = pf->main_vsi;
7213         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7214         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7215         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7216         struct i40e_tunnel_filter *tunnel, *node;
7217         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7218
7219         cld_filter = rte_zmalloc("tunnel_filter",
7220                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7221         0);
7222
7223         if (NULL == cld_filter) {
7224                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7225                 return -ENOMEM;
7226         }
7227         pfilter = cld_filter;
7228
7229         ether_addr_copy(&tunnel_filter->outer_mac,
7230                         (struct ether_addr *)&pfilter->element.outer_mac);
7231         ether_addr_copy(&tunnel_filter->inner_mac,
7232                         (struct ether_addr *)&pfilter->element.inner_mac);
7233
7234         pfilter->element.inner_vlan =
7235                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7236         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7237                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7238                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7239                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7240                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7241                                 &ipv4_addr_le,
7242                                 sizeof(pfilter->element.ipaddr.v4.data));
7243         } else {
7244                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7245                 for (i = 0; i < 4; i++) {
7246                         convert_ipv6[i] =
7247                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7248                 }
7249                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7250                            &convert_ipv6,
7251                            sizeof(pfilter->element.ipaddr.v6.data));
7252         }
7253
7254         /* check tunneled type */
7255         switch (tunnel_filter->tunnel_type) {
7256         case RTE_TUNNEL_TYPE_VXLAN:
7257                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7258                 break;
7259         case RTE_TUNNEL_TYPE_NVGRE:
7260                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7261                 break;
7262         case RTE_TUNNEL_TYPE_IP_IN_GRE:
7263                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7264                 break;
7265         default:
7266                 /* Other tunnel types is not supported. */
7267                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7268                 rte_free(cld_filter);
7269                 return -EINVAL;
7270         }
7271
7272         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7273                                        &pfilter->element.flags);
7274         if (val < 0) {
7275                 rte_free(cld_filter);
7276                 return -EINVAL;
7277         }
7278
7279         pfilter->element.flags |= rte_cpu_to_le_16(
7280                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7281                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7282         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7283         pfilter->element.queue_number =
7284                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7285
7286         /* Check if there is the filter in SW list */
7287         memset(&check_filter, 0, sizeof(check_filter));
7288         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7289         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7290         if (add && node) {
7291                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7292                 rte_free(cld_filter);
7293                 return -EINVAL;
7294         }
7295
7296         if (!add && !node) {
7297                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7298                 rte_free(cld_filter);
7299                 return -EINVAL;
7300         }
7301
7302         if (add) {
7303                 ret = i40e_aq_add_cloud_filters(hw,
7304                                         vsi->seid, &cld_filter->element, 1);
7305                 if (ret < 0) {
7306                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7307                         rte_free(cld_filter);
7308                         return -ENOTSUP;
7309                 }
7310                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7311                 if (tunnel == NULL) {
7312                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7313                         rte_free(cld_filter);
7314                         return -ENOMEM;
7315                 }
7316
7317                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7318                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7319                 if (ret < 0)
7320                         rte_free(tunnel);
7321         } else {
7322                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7323                                                    &cld_filter->element, 1);
7324                 if (ret < 0) {
7325                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7326                         rte_free(cld_filter);
7327                         return -ENOTSUP;
7328                 }
7329                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7330         }
7331
7332         rte_free(cld_filter);
7333         return ret;
7334 }
7335
7336 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7337 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7338 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7339 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7340 #define I40E_TR_GRE_KEY_MASK                    0x400
7341 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7342 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7343
7344 static enum
7345 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7346 {
7347         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7348         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7349         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7350         enum i40e_status_code status = I40E_SUCCESS;
7351
7352         if (pf->support_multi_driver) {
7353                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7354                 return I40E_NOT_SUPPORTED;
7355         }
7356
7357         memset(&filter_replace, 0,
7358                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7359         memset(&filter_replace_buf, 0,
7360                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7361
7362         /* create L1 filter */
7363         filter_replace.old_filter_type =
7364                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7365         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7366         filter_replace.tr_bit = 0;
7367
7368         /* Prepare the buffer, 3 entries */
7369         filter_replace_buf.data[0] =
7370                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7371         filter_replace_buf.data[0] |=
7372                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7373         filter_replace_buf.data[2] = 0xFF;
7374         filter_replace_buf.data[3] = 0xFF;
7375         filter_replace_buf.data[4] =
7376                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7377         filter_replace_buf.data[4] |=
7378                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7379         filter_replace_buf.data[7] = 0xF0;
7380         filter_replace_buf.data[8]
7381                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7382         filter_replace_buf.data[8] |=
7383                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7384         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7385                 I40E_TR_GENEVE_KEY_MASK |
7386                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7387         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7388                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7389                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7390
7391         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7392                                                &filter_replace_buf);
7393         if (!status) {
7394                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7395                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7396                             "cloud l1 type is changed from 0x%x to 0x%x",
7397                             filter_replace.old_filter_type,
7398                             filter_replace.new_filter_type);
7399         }
7400         return status;
7401 }
7402
7403 static enum
7404 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7405 {
7406         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7407         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7408         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7409         enum i40e_status_code status = I40E_SUCCESS;
7410
7411         if (pf->support_multi_driver) {
7412                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7413                 return I40E_NOT_SUPPORTED;
7414         }
7415
7416         /* For MPLSoUDP */
7417         memset(&filter_replace, 0,
7418                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7419         memset(&filter_replace_buf, 0,
7420                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7421         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7422                 I40E_AQC_MIRROR_CLOUD_FILTER;
7423         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7424         filter_replace.new_filter_type =
7425                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7426         /* Prepare the buffer, 2 entries */
7427         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7428         filter_replace_buf.data[0] |=
7429                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7430         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7431         filter_replace_buf.data[4] |=
7432                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7433         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7434                                                &filter_replace_buf);
7435         if (status < 0)
7436                 return status;
7437         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7438                     "cloud filter type is changed from 0x%x to 0x%x",
7439                     filter_replace.old_filter_type,
7440                     filter_replace.new_filter_type);
7441
7442         /* For MPLSoGRE */
7443         memset(&filter_replace, 0,
7444                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7445         memset(&filter_replace_buf, 0,
7446                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7447
7448         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7449                 I40E_AQC_MIRROR_CLOUD_FILTER;
7450         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7451         filter_replace.new_filter_type =
7452                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7453         /* Prepare the buffer, 2 entries */
7454         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7455         filter_replace_buf.data[0] |=
7456                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7457         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7458         filter_replace_buf.data[4] |=
7459                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7460
7461         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7462                                                &filter_replace_buf);
7463         if (!status) {
7464                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7465                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7466                             "cloud filter type is changed from 0x%x to 0x%x",
7467                             filter_replace.old_filter_type,
7468                             filter_replace.new_filter_type);
7469         }
7470         return status;
7471 }
7472
7473 static enum i40e_status_code
7474 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7475 {
7476         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7477         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7478         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7479         enum i40e_status_code status = I40E_SUCCESS;
7480
7481         if (pf->support_multi_driver) {
7482                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7483                 return I40E_NOT_SUPPORTED;
7484         }
7485
7486         /* For GTP-C */
7487         memset(&filter_replace, 0,
7488                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7489         memset(&filter_replace_buf, 0,
7490                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7491         /* create L1 filter */
7492         filter_replace.old_filter_type =
7493                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7494         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
7495         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
7496                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7497         /* Prepare the buffer, 2 entries */
7498         filter_replace_buf.data[0] =
7499                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7500         filter_replace_buf.data[0] |=
7501                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7502         filter_replace_buf.data[2] = 0xFF;
7503         filter_replace_buf.data[3] = 0xFF;
7504         filter_replace_buf.data[4] =
7505                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7506         filter_replace_buf.data[4] |=
7507                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7508         filter_replace_buf.data[6] = 0xFF;
7509         filter_replace_buf.data[7] = 0xFF;
7510         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7511                                                &filter_replace_buf);
7512         if (status < 0)
7513                 return status;
7514         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7515                     "cloud l1 type is changed from 0x%x to 0x%x",
7516                     filter_replace.old_filter_type,
7517                     filter_replace.new_filter_type);
7518
7519         /* for GTP-U */
7520         memset(&filter_replace, 0,
7521                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7522         memset(&filter_replace_buf, 0,
7523                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7524         /* create L1 filter */
7525         filter_replace.old_filter_type =
7526                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
7527         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
7528         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
7529                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7530         /* Prepare the buffer, 2 entries */
7531         filter_replace_buf.data[0] =
7532                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7533         filter_replace_buf.data[0] |=
7534                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7535         filter_replace_buf.data[2] = 0xFF;
7536         filter_replace_buf.data[3] = 0xFF;
7537         filter_replace_buf.data[4] =
7538                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7539         filter_replace_buf.data[4] |=
7540                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7541         filter_replace_buf.data[6] = 0xFF;
7542         filter_replace_buf.data[7] = 0xFF;
7543
7544         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7545                                                &filter_replace_buf);
7546         if (!status) {
7547                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7548                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7549                             "cloud l1 type is changed from 0x%x to 0x%x",
7550                             filter_replace.old_filter_type,
7551                             filter_replace.new_filter_type);
7552         }
7553         return status;
7554 }
7555
7556 static enum
7557 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
7558 {
7559         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7560         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7561         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7562         enum i40e_status_code status = I40E_SUCCESS;
7563
7564         if (pf->support_multi_driver) {
7565                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7566                 return I40E_NOT_SUPPORTED;
7567         }
7568
7569         /* for GTP-C */
7570         memset(&filter_replace, 0,
7571                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7572         memset(&filter_replace_buf, 0,
7573                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7574         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7575         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7576         filter_replace.new_filter_type =
7577                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7578         /* Prepare the buffer, 2 entries */
7579         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
7580         filter_replace_buf.data[0] |=
7581                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7582         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7583         filter_replace_buf.data[4] |=
7584                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7585         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7586                                                &filter_replace_buf);
7587         if (status < 0)
7588                 return status;
7589         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7590                     "cloud filter type is changed from 0x%x to 0x%x",
7591                     filter_replace.old_filter_type,
7592                     filter_replace.new_filter_type);
7593
7594         /* for GTP-U */
7595         memset(&filter_replace, 0,
7596                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7597         memset(&filter_replace_buf, 0,
7598                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7599         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7600         filter_replace.old_filter_type =
7601                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7602         filter_replace.new_filter_type =
7603                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7604         /* Prepare the buffer, 2 entries */
7605         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
7606         filter_replace_buf.data[0] |=
7607                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7608         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7609         filter_replace_buf.data[4] |=
7610                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7611
7612         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7613                                                &filter_replace_buf);
7614         if (!status) {
7615                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7616                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7617                             "cloud filter type is changed from 0x%x to 0x%x",
7618                             filter_replace.old_filter_type,
7619                             filter_replace.new_filter_type);
7620         }
7621         return status;
7622 }
7623
7624 int
7625 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7626                       struct i40e_tunnel_filter_conf *tunnel_filter,
7627                       uint8_t add)
7628 {
7629         uint16_t ip_type;
7630         uint32_t ipv4_addr, ipv4_addr_le;
7631         uint8_t i, tun_type = 0;
7632         /* internal variable to convert ipv6 byte order */
7633         uint32_t convert_ipv6[4];
7634         int val, ret = 0;
7635         struct i40e_pf_vf *vf = NULL;
7636         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7637         struct i40e_vsi *vsi;
7638         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7639         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7640         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7641         struct i40e_tunnel_filter *tunnel, *node;
7642         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7643         uint32_t teid_le;
7644         bool big_buffer = 0;
7645
7646         cld_filter = rte_zmalloc("tunnel_filter",
7647                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7648                          0);
7649
7650         if (cld_filter == NULL) {
7651                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7652                 return -ENOMEM;
7653         }
7654         pfilter = cld_filter;
7655
7656         ether_addr_copy(&tunnel_filter->outer_mac,
7657                         (struct ether_addr *)&pfilter->element.outer_mac);
7658         ether_addr_copy(&tunnel_filter->inner_mac,
7659                         (struct ether_addr *)&pfilter->element.inner_mac);
7660
7661         pfilter->element.inner_vlan =
7662                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7663         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7664                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7665                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7666                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7667                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7668                                 &ipv4_addr_le,
7669                                 sizeof(pfilter->element.ipaddr.v4.data));
7670         } else {
7671                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7672                 for (i = 0; i < 4; i++) {
7673                         convert_ipv6[i] =
7674                         rte_cpu_to_le_32(rte_be_to_cpu_32(
7675                                          tunnel_filter->ip_addr.ipv6_addr[i]));
7676                 }
7677                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7678                            &convert_ipv6,
7679                            sizeof(pfilter->element.ipaddr.v6.data));
7680         }
7681
7682         /* check tunneled type */
7683         switch (tunnel_filter->tunnel_type) {
7684         case I40E_TUNNEL_TYPE_VXLAN:
7685                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7686                 break;
7687         case I40E_TUNNEL_TYPE_NVGRE:
7688                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7689                 break;
7690         case I40E_TUNNEL_TYPE_IP_IN_GRE:
7691                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7692                 break;
7693         case I40E_TUNNEL_TYPE_MPLSoUDP:
7694                 if (!pf->mpls_replace_flag) {
7695                         i40e_replace_mpls_l1_filter(pf);
7696                         i40e_replace_mpls_cloud_filter(pf);
7697                         pf->mpls_replace_flag = 1;
7698                 }
7699                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7700                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7701                         teid_le >> 4;
7702                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7703                         (teid_le & 0xF) << 12;
7704                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7705                         0x40;
7706                 big_buffer = 1;
7707                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
7708                 break;
7709         case I40E_TUNNEL_TYPE_MPLSoGRE:
7710                 if (!pf->mpls_replace_flag) {
7711                         i40e_replace_mpls_l1_filter(pf);
7712                         i40e_replace_mpls_cloud_filter(pf);
7713                         pf->mpls_replace_flag = 1;
7714                 }
7715                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7716                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7717                         teid_le >> 4;
7718                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7719                         (teid_le & 0xF) << 12;
7720                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7721                         0x0;
7722                 big_buffer = 1;
7723                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
7724                 break;
7725         case I40E_TUNNEL_TYPE_GTPC:
7726                 if (!pf->gtp_replace_flag) {
7727                         i40e_replace_gtp_l1_filter(pf);
7728                         i40e_replace_gtp_cloud_filter(pf);
7729                         pf->gtp_replace_flag = 1;
7730                 }
7731                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7732                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
7733                         (teid_le >> 16) & 0xFFFF;
7734                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
7735                         teid_le & 0xFFFF;
7736                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
7737                         0x0;
7738                 big_buffer = 1;
7739                 break;
7740         case I40E_TUNNEL_TYPE_GTPU:
7741                 if (!pf->gtp_replace_flag) {
7742                         i40e_replace_gtp_l1_filter(pf);
7743                         i40e_replace_gtp_cloud_filter(pf);
7744                         pf->gtp_replace_flag = 1;
7745                 }
7746                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7747                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
7748                         (teid_le >> 16) & 0xFFFF;
7749                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
7750                         teid_le & 0xFFFF;
7751                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
7752                         0x0;
7753                 big_buffer = 1;
7754                 break;
7755         case I40E_TUNNEL_TYPE_QINQ:
7756                 if (!pf->qinq_replace_flag) {
7757                         ret = i40e_cloud_filter_qinq_create(pf);
7758                         if (ret < 0)
7759                                 PMD_DRV_LOG(DEBUG,
7760                                             "QinQ tunnel filter already created.");
7761                         pf->qinq_replace_flag = 1;
7762                 }
7763                 /*      Add in the General fields the values of
7764                  *      the Outer and Inner VLAN
7765                  *      Big Buffer should be set, see changes in
7766                  *      i40e_aq_add_cloud_filters
7767                  */
7768                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
7769                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
7770                 big_buffer = 1;
7771                 break;
7772         default:
7773                 /* Other tunnel types is not supported. */
7774                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7775                 rte_free(cld_filter);
7776                 return -EINVAL;
7777         }
7778
7779         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
7780                 pfilter->element.flags =
7781                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7782         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
7783                 pfilter->element.flags =
7784                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7785         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
7786                 pfilter->element.flags =
7787                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7788         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
7789                 pfilter->element.flags =
7790                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7791         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
7792                 pfilter->element.flags |=
7793                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
7794         else {
7795                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7796                                                 &pfilter->element.flags);
7797                 if (val < 0) {
7798                         rte_free(cld_filter);
7799                         return -EINVAL;
7800                 }
7801         }
7802
7803         pfilter->element.flags |= rte_cpu_to_le_16(
7804                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7805                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7806         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7807         pfilter->element.queue_number =
7808                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7809
7810         if (!tunnel_filter->is_to_vf)
7811                 vsi = pf->main_vsi;
7812         else {
7813                 if (tunnel_filter->vf_id >= pf->vf_num) {
7814                         PMD_DRV_LOG(ERR, "Invalid argument.");
7815                         rte_free(cld_filter);
7816                         return -EINVAL;
7817                 }
7818                 vf = &pf->vfs[tunnel_filter->vf_id];
7819                 vsi = vf->vsi;
7820         }
7821
7822         /* Check if there is the filter in SW list */
7823         memset(&check_filter, 0, sizeof(check_filter));
7824         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7825         check_filter.is_to_vf = tunnel_filter->is_to_vf;
7826         check_filter.vf_id = tunnel_filter->vf_id;
7827         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7828         if (add && node) {
7829                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7830                 rte_free(cld_filter);
7831                 return -EINVAL;
7832         }
7833
7834         if (!add && !node) {
7835                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7836                 rte_free(cld_filter);
7837                 return -EINVAL;
7838         }
7839
7840         if (add) {
7841                 if (big_buffer)
7842                         ret = i40e_aq_add_cloud_filters_big_buffer(hw,
7843                                                    vsi->seid, cld_filter, 1);
7844                 else
7845                         ret = i40e_aq_add_cloud_filters(hw,
7846                                         vsi->seid, &cld_filter->element, 1);
7847                 if (ret < 0) {
7848                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7849                         rte_free(cld_filter);
7850                         return -ENOTSUP;
7851                 }
7852                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7853                 if (tunnel == NULL) {
7854                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7855                         rte_free(cld_filter);
7856                         return -ENOMEM;
7857                 }
7858
7859                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7860                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7861                 if (ret < 0)
7862                         rte_free(tunnel);
7863         } else {
7864                 if (big_buffer)
7865                         ret = i40e_aq_remove_cloud_filters_big_buffer(
7866                                 hw, vsi->seid, cld_filter, 1);
7867                 else
7868                         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7869                                                    &cld_filter->element, 1);
7870                 if (ret < 0) {
7871                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7872                         rte_free(cld_filter);
7873                         return -ENOTSUP;
7874                 }
7875                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7876         }
7877
7878         rte_free(cld_filter);
7879         return ret;
7880 }
7881
7882 static int
7883 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
7884 {
7885         uint8_t i;
7886
7887         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7888                 if (pf->vxlan_ports[i] == port)
7889                         return i;
7890         }
7891
7892         return -1;
7893 }
7894
7895 static int
7896 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
7897 {
7898         int  idx, ret;
7899         uint8_t filter_idx;
7900         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7901
7902         idx = i40e_get_vxlan_port_idx(pf, port);
7903
7904         /* Check if port already exists */
7905         if (idx >= 0) {
7906                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
7907                 return -EINVAL;
7908         }
7909
7910         /* Now check if there is space to add the new port */
7911         idx = i40e_get_vxlan_port_idx(pf, 0);
7912         if (idx < 0) {
7913                 PMD_DRV_LOG(ERR,
7914                         "Maximum number of UDP ports reached, not adding port %d",
7915                         port);
7916                 return -ENOSPC;
7917         }
7918
7919         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
7920                                         &filter_idx, NULL);
7921         if (ret < 0) {
7922                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
7923                 return -1;
7924         }
7925
7926         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
7927                          port,  filter_idx);
7928
7929         /* New port: add it and mark its index in the bitmap */
7930         pf->vxlan_ports[idx] = port;
7931         pf->vxlan_bitmap |= (1 << idx);
7932
7933         if (!(pf->flags & I40E_FLAG_VXLAN))
7934                 pf->flags |= I40E_FLAG_VXLAN;
7935
7936         return 0;
7937 }
7938
7939 static int
7940 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
7941 {
7942         int idx;
7943         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7944
7945         if (!(pf->flags & I40E_FLAG_VXLAN)) {
7946                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
7947                 return -EINVAL;
7948         }
7949
7950         idx = i40e_get_vxlan_port_idx(pf, port);
7951
7952         if (idx < 0) {
7953                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
7954                 return -EINVAL;
7955         }
7956
7957         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
7958                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
7959                 return -1;
7960         }
7961
7962         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
7963                         port, idx);
7964
7965         pf->vxlan_ports[idx] = 0;
7966         pf->vxlan_bitmap &= ~(1 << idx);
7967
7968         if (!pf->vxlan_bitmap)
7969                 pf->flags &= ~I40E_FLAG_VXLAN;
7970
7971         return 0;
7972 }
7973
7974 /* Add UDP tunneling port */
7975 static int
7976 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
7977                              struct rte_eth_udp_tunnel *udp_tunnel)
7978 {
7979         int ret = 0;
7980         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7981
7982         if (udp_tunnel == NULL)
7983                 return -EINVAL;
7984
7985         switch (udp_tunnel->prot_type) {
7986         case RTE_TUNNEL_TYPE_VXLAN:
7987                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
7988                 break;
7989
7990         case RTE_TUNNEL_TYPE_GENEVE:
7991         case RTE_TUNNEL_TYPE_TEREDO:
7992                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7993                 ret = -1;
7994                 break;
7995
7996         default:
7997                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7998                 ret = -1;
7999                 break;
8000         }
8001
8002         return ret;
8003 }
8004
8005 /* Remove UDP tunneling port */
8006 static int
8007 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8008                              struct rte_eth_udp_tunnel *udp_tunnel)
8009 {
8010         int ret = 0;
8011         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8012
8013         if (udp_tunnel == NULL)
8014                 return -EINVAL;
8015
8016         switch (udp_tunnel->prot_type) {
8017         case RTE_TUNNEL_TYPE_VXLAN:
8018                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8019                 break;
8020         case RTE_TUNNEL_TYPE_GENEVE:
8021         case RTE_TUNNEL_TYPE_TEREDO:
8022                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8023                 ret = -1;
8024                 break;
8025         default:
8026                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8027                 ret = -1;
8028                 break;
8029         }
8030
8031         return ret;
8032 }
8033
8034 /* Calculate the maximum number of contiguous PF queues that are configured */
8035 static int
8036 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8037 {
8038         struct rte_eth_dev_data *data = pf->dev_data;
8039         int i, num;
8040         struct i40e_rx_queue *rxq;
8041
8042         num = 0;
8043         for (i = 0; i < pf->lan_nb_qps; i++) {
8044                 rxq = data->rx_queues[i];
8045                 if (rxq && rxq->q_set)
8046                         num++;
8047                 else
8048                         break;
8049         }
8050
8051         return num;
8052 }
8053
8054 /* Configure RSS */
8055 static int
8056 i40e_pf_config_rss(struct i40e_pf *pf)
8057 {
8058         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8059         struct rte_eth_rss_conf rss_conf;
8060         uint32_t i, lut = 0;
8061         uint16_t j, num;
8062
8063         /*
8064          * If both VMDQ and RSS enabled, not all of PF queues are configured.
8065          * It's necessary to calculate the actual PF queues that are configured.
8066          */
8067         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8068                 num = i40e_pf_calc_configured_queues_num(pf);
8069         else
8070                 num = pf->dev_data->nb_rx_queues;
8071
8072         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8073         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
8074                         num);
8075
8076         if (num == 0) {
8077                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
8078                 return -ENOTSUP;
8079         }
8080
8081         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
8082                 if (j == num)
8083                         j = 0;
8084                 lut = (lut << 8) | (j & ((0x1 <<
8085                         hw->func_caps.rss_table_entry_width) - 1));
8086                 if ((i & 3) == 3)
8087                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
8088         }
8089
8090         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
8091         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
8092                 i40e_pf_disable_rss(pf);
8093                 return 0;
8094         }
8095         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
8096                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
8097                 /* Random default keys */
8098                 static uint32_t rss_key_default[] = {0x6b793944,
8099                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8100                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8101                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8102
8103                 rss_conf.rss_key = (uint8_t *)rss_key_default;
8104                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8105                                                         sizeof(uint32_t);
8106         }
8107
8108         return i40e_hw_rss_hash_set(pf, &rss_conf);
8109 }
8110
8111 static int
8112 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
8113                                struct rte_eth_tunnel_filter_conf *filter)
8114 {
8115         if (pf == NULL || filter == NULL) {
8116                 PMD_DRV_LOG(ERR, "Invalid parameter");
8117                 return -EINVAL;
8118         }
8119
8120         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
8121                 PMD_DRV_LOG(ERR, "Invalid queue ID");
8122                 return -EINVAL;
8123         }
8124
8125         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
8126                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
8127                 return -EINVAL;
8128         }
8129
8130         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
8131                 (is_zero_ether_addr(&filter->outer_mac))) {
8132                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
8133                 return -EINVAL;
8134         }
8135
8136         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
8137                 (is_zero_ether_addr(&filter->inner_mac))) {
8138                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
8139                 return -EINVAL;
8140         }
8141
8142         return 0;
8143 }
8144
8145 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8146 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8147 static int
8148 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8149 {
8150         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8151         uint32_t val, reg;
8152         int ret = -EINVAL;
8153
8154         if (pf->support_multi_driver) {
8155                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8156                 return -ENOTSUP;
8157         }
8158
8159         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8160         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8161
8162         if (len == 3) {
8163                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8164         } else if (len == 4) {
8165                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8166         } else {
8167                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8168                 return ret;
8169         }
8170
8171         if (reg != val) {
8172                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
8173                                                    reg, NULL);
8174                 if (ret != 0)
8175                         return ret;
8176                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8177                             "with value 0x%08x",
8178                             I40E_GL_PRS_FVBM(2), reg);
8179                 i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
8180         } else {
8181                 ret = 0;
8182         }
8183         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8184                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8185
8186         return ret;
8187 }
8188
8189 static int
8190 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
8191 {
8192         int ret = -EINVAL;
8193
8194         if (!hw || !cfg)
8195                 return -EINVAL;
8196
8197         switch (cfg->cfg_type) {
8198         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
8199                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
8200                 break;
8201         default:
8202                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
8203                 break;
8204         }
8205
8206         return ret;
8207 }
8208
8209 static int
8210 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
8211                                enum rte_filter_op filter_op,
8212                                void *arg)
8213 {
8214         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8215         int ret = I40E_ERR_PARAM;
8216
8217         switch (filter_op) {
8218         case RTE_ETH_FILTER_SET:
8219                 ret = i40e_dev_global_config_set(hw,
8220                         (struct rte_eth_global_cfg *)arg);
8221                 break;
8222         default:
8223                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8224                 break;
8225         }
8226
8227         return ret;
8228 }
8229
8230 static int
8231 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
8232                           enum rte_filter_op filter_op,
8233                           void *arg)
8234 {
8235         struct rte_eth_tunnel_filter_conf *filter;
8236         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8237         int ret = I40E_SUCCESS;
8238
8239         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
8240
8241         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
8242                 return I40E_ERR_PARAM;
8243
8244         switch (filter_op) {
8245         case RTE_ETH_FILTER_NOP:
8246                 if (!(pf->flags & I40E_FLAG_VXLAN))
8247                         ret = I40E_NOT_SUPPORTED;
8248                 break;
8249         case RTE_ETH_FILTER_ADD:
8250                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
8251                 break;
8252         case RTE_ETH_FILTER_DELETE:
8253                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
8254                 break;
8255         default:
8256                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8257                 ret = I40E_ERR_PARAM;
8258                 break;
8259         }
8260
8261         return ret;
8262 }
8263
8264 static int
8265 i40e_pf_config_mq_rx(struct i40e_pf *pf)
8266 {
8267         int ret = 0;
8268         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8269
8270         /* RSS setup */
8271         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
8272                 ret = i40e_pf_config_rss(pf);
8273         else
8274                 i40e_pf_disable_rss(pf);
8275
8276         return ret;
8277 }
8278
8279 /* Get the symmetric hash enable configurations per port */
8280 static void
8281 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
8282 {
8283         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8284
8285         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
8286 }
8287
8288 /* Set the symmetric hash enable configurations per port */
8289 static void
8290 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8291 {
8292         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8293
8294         if (enable > 0) {
8295                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
8296                         PMD_DRV_LOG(INFO,
8297                                 "Symmetric hash has already been enabled");
8298                         return;
8299                 }
8300                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8301         } else {
8302                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
8303                         PMD_DRV_LOG(INFO,
8304                                 "Symmetric hash has already been disabled");
8305                         return;
8306                 }
8307                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8308         }
8309         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8310         I40E_WRITE_FLUSH(hw);
8311 }
8312
8313 /*
8314  * Get global configurations of hash function type and symmetric hash enable
8315  * per flow type (pctype). Note that global configuration means it affects all
8316  * the ports on the same NIC.
8317  */
8318 static int
8319 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
8320                                    struct rte_eth_hash_global_conf *g_cfg)
8321 {
8322         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8323         uint32_t reg;
8324         uint16_t i, j;
8325
8326         memset(g_cfg, 0, sizeof(*g_cfg));
8327         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8328         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
8329                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
8330         else
8331                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
8332         PMD_DRV_LOG(DEBUG, "Hash function is %s",
8333                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
8334
8335         /*
8336          * As i40e supports less than 64 flow types, only first 64 bits need to
8337          * be checked.
8338          */
8339         for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8340                 g_cfg->valid_bit_mask[i] = 0ULL;
8341                 g_cfg->sym_hash_enable_mask[i] = 0ULL;
8342         }
8343
8344         g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
8345
8346         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
8347                 if (!adapter->pctypes_tbl[i])
8348                         continue;
8349                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8350                      j < I40E_FILTER_PCTYPE_MAX; j++) {
8351                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
8352                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
8353                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8354                                         g_cfg->sym_hash_enable_mask[0] |=
8355                                                                 (1ULL << i);
8356                                 }
8357                         }
8358                 }
8359         }
8360
8361         return 0;
8362 }
8363
8364 static int
8365 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
8366                               const struct rte_eth_hash_global_conf *g_cfg)
8367 {
8368         uint32_t i;
8369         uint64_t mask0, i40e_mask = adapter->flow_types_mask;
8370
8371         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
8372                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
8373                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
8374                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
8375                                                 g_cfg->hash_func);
8376                 return -EINVAL;
8377         }
8378
8379         /*
8380          * As i40e supports less than 64 flow types, only first 64 bits need to
8381          * be checked.
8382          */
8383         mask0 = g_cfg->valid_bit_mask[0];
8384         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8385                 if (i == 0) {
8386                         /* Check if any unsupported flow type configured */
8387                         if ((mask0 | i40e_mask) ^ i40e_mask)
8388                                 goto mask_err;
8389                 } else {
8390                         if (g_cfg->valid_bit_mask[i])
8391                                 goto mask_err;
8392                 }
8393         }
8394
8395         return 0;
8396
8397 mask_err:
8398         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
8399
8400         return -EINVAL;
8401 }
8402
8403 /*
8404  * Set global configurations of hash function type and symmetric hash enable
8405  * per flow type (pctype). Note any modifying global configuration will affect
8406  * all the ports on the same NIC.
8407  */
8408 static int
8409 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
8410                                    struct rte_eth_hash_global_conf *g_cfg)
8411 {
8412         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8413         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8414         int ret;
8415         uint16_t i, j;
8416         uint32_t reg;
8417         uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
8418
8419         if (pf->support_multi_driver) {
8420                 PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
8421                 return -ENOTSUP;
8422         }
8423
8424         /* Check the input parameters */
8425         ret = i40e_hash_global_config_check(adapter, g_cfg);
8426         if (ret < 0)
8427                 return ret;
8428
8429         /*
8430          * As i40e supports less than 64 flow types, only first 64 bits need to
8431          * be configured.
8432          */
8433         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
8434                 if (mask0 & (1UL << i)) {
8435                         reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
8436                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
8437
8438                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8439                              j < I40E_FILTER_PCTYPE_MAX; j++) {
8440                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
8441                                         i40e_write_global_rx_ctl(hw,
8442                                                           I40E_GLQF_HSYM(j),
8443                                                           reg);
8444                         }
8445                         i40e_global_cfg_warning(I40E_WARNING_HSYM);
8446                 }
8447         }
8448
8449         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8450         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
8451                 /* Toeplitz */
8452                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
8453                         PMD_DRV_LOG(DEBUG,
8454                                 "Hash function already set to Toeplitz");
8455                         goto out;
8456                 }
8457                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
8458         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
8459                 /* Simple XOR */
8460                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
8461                         PMD_DRV_LOG(DEBUG,
8462                                 "Hash function already set to Simple XOR");
8463                         goto out;
8464                 }
8465                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
8466         } else
8467                 /* Use the default, and keep it as it is */
8468                 goto out;
8469
8470         i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
8471         i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
8472
8473 out:
8474         I40E_WRITE_FLUSH(hw);
8475
8476         return 0;
8477 }
8478
8479 /**
8480  * Valid input sets for hash and flow director filters per PCTYPE
8481  */
8482 static uint64_t
8483 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8484                 enum rte_filter_type filter)
8485 {
8486         uint64_t valid;
8487
8488         static const uint64_t valid_hash_inset_table[] = {
8489                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8490                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8491                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8492                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8493                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8494                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8495                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8496                         I40E_INSET_FLEX_PAYLOAD,
8497                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8498                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8499                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8500                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8501                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8502                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8503                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8504                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8505                         I40E_INSET_FLEX_PAYLOAD,
8506                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8507                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8508                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8509                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8510                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8511                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8512                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8513                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8514                         I40E_INSET_FLEX_PAYLOAD,
8515                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8516                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8517                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8518                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8519                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8520                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8521                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8522                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8523                         I40E_INSET_FLEX_PAYLOAD,
8524                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8525                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8526                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8527                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8528                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8529                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8530                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8531                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8532                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8533                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8534                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8535                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8536                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8537                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8538                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8539                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8540                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8541                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8542                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8543                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8544                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8545                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8546                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8547                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8548                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8549                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8550                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
8551                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8552                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8553                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8554                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8555                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8556                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8557                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8558                         I40E_INSET_FLEX_PAYLOAD,
8559                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8560                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8561                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8562                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8563                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8564                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
8565                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
8566                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
8567                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8568                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8569                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8570                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8571                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8572                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8573                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8574                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
8575                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8576                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8577                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8578                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8579                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8580                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8581                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8582                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8583                         I40E_INSET_FLEX_PAYLOAD,
8584                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8585                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8586                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8587                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8588                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8589                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8590                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8591                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8592                         I40E_INSET_FLEX_PAYLOAD,
8593                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8594                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8595                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8596                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8597                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8598                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8599                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8600                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8601                         I40E_INSET_FLEX_PAYLOAD,
8602                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8603                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8604                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8605                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8606                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8607                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8608                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8609                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8610                         I40E_INSET_FLEX_PAYLOAD,
8611                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8612                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8613                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8614                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8615                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8616                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8617                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8618                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8619                         I40E_INSET_FLEX_PAYLOAD,
8620                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8621                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8622                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8623                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8624                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8625                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8626                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8627                         I40E_INSET_FLEX_PAYLOAD,
8628                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8629                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8630                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8631                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8632                         I40E_INSET_FLEX_PAYLOAD,
8633         };
8634
8635         /**
8636          * Flow director supports only fields defined in
8637          * union rte_eth_fdir_flow.
8638          */
8639         static const uint64_t valid_fdir_inset_table[] = {
8640                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8641                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8642                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8643                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8644                 I40E_INSET_IPV4_TTL,
8645                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8646                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8647                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8648                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8649                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8650                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8651                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8652                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8653                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8654                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8655                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8656                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8657                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8658                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8659                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8660                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8661                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8662                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8663                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8664                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8665                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8666                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8667                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8668                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8669                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8670                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8671                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8672                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8673                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8674                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8675                 I40E_INSET_SCTP_VT,
8676                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8677                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8678                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8679                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8680                 I40E_INSET_IPV4_TTL,
8681                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8682                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8683                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8684                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8685                 I40E_INSET_IPV6_HOP_LIMIT,
8686                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8687                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8688                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8689                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8690                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8691                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8692                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8693                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8694                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8695                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8696                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8697                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8698                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8699                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8700                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8701                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8702                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8703                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8704                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8705                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8706                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8707                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8708                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8709                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8710                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8711                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8712                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8713                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8714                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8715                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8716                 I40E_INSET_SCTP_VT,
8717                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8718                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8719                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8720                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8721                 I40E_INSET_IPV6_HOP_LIMIT,
8722                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8723                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8724                 I40E_INSET_LAST_ETHER_TYPE,
8725         };
8726
8727         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8728                 return 0;
8729         if (filter == RTE_ETH_FILTER_HASH)
8730                 valid = valid_hash_inset_table[pctype];
8731         else
8732                 valid = valid_fdir_inset_table[pctype];
8733
8734         return valid;
8735 }
8736
8737 /**
8738  * Validate if the input set is allowed for a specific PCTYPE
8739  */
8740 int
8741 i40e_validate_input_set(enum i40e_filter_pctype pctype,
8742                 enum rte_filter_type filter, uint64_t inset)
8743 {
8744         uint64_t valid;
8745
8746         valid = i40e_get_valid_input_set(pctype, filter);
8747         if (inset & (~valid))
8748                 return -EINVAL;
8749
8750         return 0;
8751 }
8752
8753 /* default input set fields combination per pctype */
8754 uint64_t
8755 i40e_get_default_input_set(uint16_t pctype)
8756 {
8757         static const uint64_t default_inset_table[] = {
8758                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8759                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8760                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8761                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8762                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8763                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8764                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8765                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8766                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8767                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8768                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8769                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8770                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8771                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8772                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8773                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8774                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8775                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8776                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8777                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8778                         I40E_INSET_SCTP_VT,
8779                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8780                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8781                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8782                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8783                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8784                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8785                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8786                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8787                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8788                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8789                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8790                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8791                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8792                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8793                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8794                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8795                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8796                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8797                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8798                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8799                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8800                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8801                         I40E_INSET_SCTP_VT,
8802                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8803                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8804                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8805                         I40E_INSET_LAST_ETHER_TYPE,
8806         };
8807
8808         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8809                 return 0;
8810
8811         return default_inset_table[pctype];
8812 }
8813
8814 /**
8815  * Parse the input set from index to logical bit masks
8816  */
8817 static int
8818 i40e_parse_input_set(uint64_t *inset,
8819                      enum i40e_filter_pctype pctype,
8820                      enum rte_eth_input_set_field *field,
8821                      uint16_t size)
8822 {
8823         uint16_t i, j;
8824         int ret = -EINVAL;
8825
8826         static const struct {
8827                 enum rte_eth_input_set_field field;
8828                 uint64_t inset;
8829         } inset_convert_table[] = {
8830                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
8831                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
8832                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
8833                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
8834                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
8835                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
8836                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
8837                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
8838                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
8839                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
8840                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
8841                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
8842                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
8843                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
8844                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
8845                         I40E_INSET_IPV6_NEXT_HDR},
8846                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
8847                         I40E_INSET_IPV6_HOP_LIMIT},
8848                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
8849                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
8850                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
8851                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
8852                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
8853                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
8854                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
8855                         I40E_INSET_SCTP_VT},
8856                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
8857                         I40E_INSET_TUNNEL_DMAC},
8858                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
8859                         I40E_INSET_VLAN_TUNNEL},
8860                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
8861                         I40E_INSET_TUNNEL_ID},
8862                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
8863                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
8864                         I40E_INSET_FLEX_PAYLOAD_W1},
8865                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
8866                         I40E_INSET_FLEX_PAYLOAD_W2},
8867                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
8868                         I40E_INSET_FLEX_PAYLOAD_W3},
8869                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
8870                         I40E_INSET_FLEX_PAYLOAD_W4},
8871                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
8872                         I40E_INSET_FLEX_PAYLOAD_W5},
8873                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
8874                         I40E_INSET_FLEX_PAYLOAD_W6},
8875                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
8876                         I40E_INSET_FLEX_PAYLOAD_W7},
8877                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
8878                         I40E_INSET_FLEX_PAYLOAD_W8},
8879         };
8880
8881         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
8882                 return ret;
8883
8884         /* Only one item allowed for default or all */
8885         if (size == 1) {
8886                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
8887                         *inset = i40e_get_default_input_set(pctype);
8888                         return 0;
8889                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
8890                         *inset = I40E_INSET_NONE;
8891                         return 0;
8892                 }
8893         }
8894
8895         for (i = 0, *inset = 0; i < size; i++) {
8896                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
8897                         if (field[i] == inset_convert_table[j].field) {
8898                                 *inset |= inset_convert_table[j].inset;
8899                                 break;
8900                         }
8901                 }
8902
8903                 /* It contains unsupported input set, return immediately */
8904                 if (j == RTE_DIM(inset_convert_table))
8905                         return ret;
8906         }
8907
8908         return 0;
8909 }
8910
8911 /**
8912  * Translate the input set from bit masks to register aware bit masks
8913  * and vice versa
8914  */
8915 uint64_t
8916 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
8917 {
8918         uint64_t val = 0;
8919         uint16_t i;
8920
8921         struct inset_map {
8922                 uint64_t inset;
8923                 uint64_t inset_reg;
8924         };
8925
8926         static const struct inset_map inset_map_common[] = {
8927                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
8928                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
8929                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
8930                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
8931                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
8932                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
8933                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
8934                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
8935                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
8936                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
8937                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
8938                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
8939                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
8940                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
8941                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
8942                 {I40E_INSET_TUNNEL_DMAC,
8943                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
8944                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
8945                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
8946                 {I40E_INSET_TUNNEL_SRC_PORT,
8947                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
8948                 {I40E_INSET_TUNNEL_DST_PORT,
8949                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
8950                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
8951                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
8952                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
8953                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
8954                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
8955                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
8956                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
8957                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
8958                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
8959         };
8960
8961     /* some different registers map in x722*/
8962         static const struct inset_map inset_map_diff_x722[] = {
8963                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
8964                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
8965                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
8966                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
8967         };
8968
8969         static const struct inset_map inset_map_diff_not_x722[] = {
8970                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
8971                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
8972                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
8973                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
8974         };
8975
8976         if (input == 0)
8977                 return val;
8978
8979         /* Translate input set to register aware inset */
8980         if (type == I40E_MAC_X722) {
8981                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
8982                         if (input & inset_map_diff_x722[i].inset)
8983                                 val |= inset_map_diff_x722[i].inset_reg;
8984                 }
8985         } else {
8986                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
8987                         if (input & inset_map_diff_not_x722[i].inset)
8988                                 val |= inset_map_diff_not_x722[i].inset_reg;
8989                 }
8990         }
8991
8992         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
8993                 if (input & inset_map_common[i].inset)
8994                         val |= inset_map_common[i].inset_reg;
8995         }
8996
8997         return val;
8998 }
8999
9000 int
9001 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9002 {
9003         uint8_t i, idx = 0;
9004         uint64_t inset_need_mask = inset;
9005
9006         static const struct {
9007                 uint64_t inset;
9008                 uint32_t mask;
9009         } inset_mask_map[] = {
9010                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
9011                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
9012                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
9013                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
9014                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
9015                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
9016                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
9017                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
9018         };
9019
9020         if (!inset || !mask || !nb_elem)
9021                 return 0;
9022
9023         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9024                 /* Clear the inset bit, if no MASK is required,
9025                  * for example proto + ttl
9026                  */
9027                 if ((inset & inset_mask_map[i].inset) ==
9028                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
9029                         inset_need_mask &= ~inset_mask_map[i].inset;
9030                 if (!inset_need_mask)
9031                         return 0;
9032         }
9033         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9034                 if ((inset_need_mask & inset_mask_map[i].inset) ==
9035                     inset_mask_map[i].inset) {
9036                         if (idx >= nb_elem) {
9037                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
9038                                 return -EINVAL;
9039                         }
9040                         mask[idx] = inset_mask_map[i].mask;
9041                         idx++;
9042                 }
9043         }
9044
9045         return idx;
9046 }
9047
9048 void
9049 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9050 {
9051         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9052
9053         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9054         if (reg != val)
9055                 i40e_write_rx_ctl(hw, addr, val);
9056         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9057                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9058 }
9059
9060 void
9061 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9062 {
9063         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9064
9065         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9066         if (reg != val)
9067                 i40e_write_global_rx_ctl(hw, addr, val);
9068         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9069                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9070 }
9071
9072 static void
9073 i40e_filter_input_set_init(struct i40e_pf *pf)
9074 {
9075         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9076         enum i40e_filter_pctype pctype;
9077         uint64_t input_set, inset_reg;
9078         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9079         int num, i;
9080         uint16_t flow_type;
9081
9082         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9083              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9084                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9085
9086                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9087                         continue;
9088
9089                 input_set = i40e_get_default_input_set(pctype);
9090
9091                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9092                                                    I40E_INSET_MASK_NUM_REG);
9093                 if (num < 0)
9094                         return;
9095                 if (pf->support_multi_driver && num > 0) {
9096                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9097                         return;
9098                 }
9099                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9100                                         input_set);
9101
9102                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9103                                       (uint32_t)(inset_reg & UINT32_MAX));
9104                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9105                                      (uint32_t)((inset_reg >>
9106                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
9107                 if (!pf->support_multi_driver) {
9108                         i40e_check_write_global_reg(hw,
9109                                             I40E_GLQF_HASH_INSET(0, pctype),
9110                                             (uint32_t)(inset_reg & UINT32_MAX));
9111                         i40e_check_write_global_reg(hw,
9112                                              I40E_GLQF_HASH_INSET(1, pctype),
9113                                              (uint32_t)((inset_reg >>
9114                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
9115
9116                         for (i = 0; i < num; i++) {
9117                                 i40e_check_write_global_reg(hw,
9118                                                     I40E_GLQF_FD_MSK(i, pctype),
9119                                                     mask_reg[i]);
9120                                 i40e_check_write_global_reg(hw,
9121                                                   I40E_GLQF_HASH_MSK(i, pctype),
9122                                                   mask_reg[i]);
9123                         }
9124                         /*clear unused mask registers of the pctype */
9125                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9126                                 i40e_check_write_global_reg(hw,
9127                                                     I40E_GLQF_FD_MSK(i, pctype),
9128                                                     0);
9129                                 i40e_check_write_global_reg(hw,
9130                                                   I40E_GLQF_HASH_MSK(i, pctype),
9131                                                   0);
9132                         }
9133                 } else {
9134                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9135                 }
9136                 I40E_WRITE_FLUSH(hw);
9137
9138                 /* store the default input set */
9139                 if (!pf->support_multi_driver)
9140                         pf->hash_input_set[pctype] = input_set;
9141                 pf->fdir.input_set[pctype] = input_set;
9142         }
9143
9144         if (!pf->support_multi_driver) {
9145                 i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9146                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9147                 i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9148         }
9149 }
9150
9151 int
9152 i40e_hash_filter_inset_select(struct i40e_hw *hw,
9153                          struct rte_eth_input_set_conf *conf)
9154 {
9155         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9156         enum i40e_filter_pctype pctype;
9157         uint64_t input_set, inset_reg = 0;
9158         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9159         int ret, i, num;
9160
9161         if (!conf) {
9162                 PMD_DRV_LOG(ERR, "Invalid pointer");
9163                 return -EFAULT;
9164         }
9165         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9166             conf->op != RTE_ETH_INPUT_SET_ADD) {
9167                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9168                 return -EINVAL;
9169         }
9170
9171         if (pf->support_multi_driver) {
9172                 PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
9173                 return -ENOTSUP;
9174         }
9175
9176         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9177         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9178                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9179                 return -EINVAL;
9180         }
9181
9182         if (hw->mac.type == I40E_MAC_X722) {
9183                 /* get translated pctype value in fd pctype register */
9184                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
9185                         I40E_GLQF_FD_PCTYPES((int)pctype));
9186         }
9187
9188         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9189                                    conf->inset_size);
9190         if (ret) {
9191                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9192                 return -EINVAL;
9193         }
9194
9195         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
9196                 /* get inset value in register */
9197                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9198                 inset_reg <<= I40E_32_BIT_WIDTH;
9199                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9200                 input_set |= pf->hash_input_set[pctype];
9201         }
9202         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9203                                            I40E_INSET_MASK_NUM_REG);
9204         if (num < 0)
9205                 return -EINVAL;
9206
9207         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9208
9209         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9210                                     (uint32_t)(inset_reg & UINT32_MAX));
9211         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9212                                     (uint32_t)((inset_reg >>
9213                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
9214         i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9215
9216         for (i = 0; i < num; i++)
9217                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9218                                             mask_reg[i]);
9219         /*clear unused mask registers of the pctype */
9220         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9221                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9222                                             0);
9223         i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9224         I40E_WRITE_FLUSH(hw);
9225
9226         pf->hash_input_set[pctype] = input_set;
9227         return 0;
9228 }
9229
9230 int
9231 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
9232                          struct rte_eth_input_set_conf *conf)
9233 {
9234         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9235         enum i40e_filter_pctype pctype;
9236         uint64_t input_set, inset_reg = 0;
9237         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9238         int ret, i, num;
9239
9240         if (!hw || !conf) {
9241                 PMD_DRV_LOG(ERR, "Invalid pointer");
9242                 return -EFAULT;
9243         }
9244         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9245             conf->op != RTE_ETH_INPUT_SET_ADD) {
9246                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9247                 return -EINVAL;
9248         }
9249
9250         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9251
9252         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9253                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9254                 return -EINVAL;
9255         }
9256
9257         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9258                                    conf->inset_size);
9259         if (ret) {
9260                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9261                 return -EINVAL;
9262         }
9263
9264         /* get inset value in register */
9265         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
9266         inset_reg <<= I40E_32_BIT_WIDTH;
9267         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
9268
9269         /* Can not change the inset reg for flex payload for fdir,
9270          * it is done by writing I40E_PRTQF_FD_FLXINSET
9271          * in i40e_set_flex_mask_on_pctype.
9272          */
9273         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
9274                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
9275         else
9276                 input_set |= pf->fdir.input_set[pctype];
9277         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9278                                            I40E_INSET_MASK_NUM_REG);
9279         if (num < 0)
9280                 return -EINVAL;
9281         if (pf->support_multi_driver && num > 0) {
9282                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9283                 return -ENOTSUP;
9284         }
9285
9286         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9287
9288         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9289                               (uint32_t)(inset_reg & UINT32_MAX));
9290         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9291                              (uint32_t)((inset_reg >>
9292                              I40E_32_BIT_WIDTH) & UINT32_MAX));
9293
9294         if (!pf->support_multi_driver) {
9295                 for (i = 0; i < num; i++)
9296                         i40e_check_write_global_reg(hw,
9297                                                     I40E_GLQF_FD_MSK(i, pctype),
9298                                                     mask_reg[i]);
9299                 /*clear unused mask registers of the pctype */
9300                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9301                         i40e_check_write_global_reg(hw,
9302                                                     I40E_GLQF_FD_MSK(i, pctype),
9303                                                     0);
9304                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9305         } else {
9306                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9307         }
9308         I40E_WRITE_FLUSH(hw);
9309
9310         pf->fdir.input_set[pctype] = input_set;
9311         return 0;
9312 }
9313
9314 static int
9315 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9316 {
9317         int ret = 0;
9318
9319         if (!hw || !info) {
9320                 PMD_DRV_LOG(ERR, "Invalid pointer");
9321                 return -EFAULT;
9322         }
9323
9324         switch (info->info_type) {
9325         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9326                 i40e_get_symmetric_hash_enable_per_port(hw,
9327                                         &(info->info.enable));
9328                 break;
9329         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9330                 ret = i40e_get_hash_filter_global_config(hw,
9331                                 &(info->info.global_conf));
9332                 break;
9333         default:
9334                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9335                                                         info->info_type);
9336                 ret = -EINVAL;
9337                 break;
9338         }
9339
9340         return ret;
9341 }
9342
9343 static int
9344 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9345 {
9346         int ret = 0;
9347
9348         if (!hw || !info) {
9349                 PMD_DRV_LOG(ERR, "Invalid pointer");
9350                 return -EFAULT;
9351         }
9352
9353         switch (info->info_type) {
9354         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9355                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
9356                 break;
9357         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9358                 ret = i40e_set_hash_filter_global_config(hw,
9359                                 &(info->info.global_conf));
9360                 break;
9361         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
9362                 ret = i40e_hash_filter_inset_select(hw,
9363                                                &(info->info.input_set_conf));
9364                 break;
9365
9366         default:
9367                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9368                                                         info->info_type);
9369                 ret = -EINVAL;
9370                 break;
9371         }
9372
9373         return ret;
9374 }
9375
9376 /* Operations for hash function */
9377 static int
9378 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
9379                       enum rte_filter_op filter_op,
9380                       void *arg)
9381 {
9382         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9383         int ret = 0;
9384
9385         switch (filter_op) {
9386         case RTE_ETH_FILTER_NOP:
9387                 break;
9388         case RTE_ETH_FILTER_GET:
9389                 ret = i40e_hash_filter_get(hw,
9390                         (struct rte_eth_hash_filter_info *)arg);
9391                 break;
9392         case RTE_ETH_FILTER_SET:
9393                 ret = i40e_hash_filter_set(hw,
9394                         (struct rte_eth_hash_filter_info *)arg);
9395                 break;
9396         default:
9397                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
9398                                                                 filter_op);
9399                 ret = -ENOTSUP;
9400                 break;
9401         }
9402
9403         return ret;
9404 }
9405
9406 /* Convert ethertype filter structure */
9407 static int
9408 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9409                               struct i40e_ethertype_filter *filter)
9410 {
9411         rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
9412         filter->input.ether_type = input->ether_type;
9413         filter->flags = input->flags;
9414         filter->queue = input->queue;
9415
9416         return 0;
9417 }
9418
9419 /* Check if there exists the ehtertype filter */
9420 struct i40e_ethertype_filter *
9421 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9422                                 const struct i40e_ethertype_filter_input *input)
9423 {
9424         int ret;
9425
9426         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9427         if (ret < 0)
9428                 return NULL;
9429
9430         return ethertype_rule->hash_map[ret];
9431 }
9432
9433 /* Add ethertype filter in SW list */
9434 static int
9435 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9436                                 struct i40e_ethertype_filter *filter)
9437 {
9438         struct i40e_ethertype_rule *rule = &pf->ethertype;
9439         int ret;
9440
9441         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9442         if (ret < 0) {
9443                 PMD_DRV_LOG(ERR,
9444                             "Failed to insert ethertype filter"
9445                             " to hash table %d!",
9446                             ret);
9447                 return ret;
9448         }
9449         rule->hash_map[ret] = filter;
9450
9451         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9452
9453         return 0;
9454 }
9455
9456 /* Delete ethertype filter in SW list */
9457 int
9458 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9459                              struct i40e_ethertype_filter_input *input)
9460 {
9461         struct i40e_ethertype_rule *rule = &pf->ethertype;
9462         struct i40e_ethertype_filter *filter;
9463         int ret;
9464
9465         ret = rte_hash_del_key(rule->hash_table, input);
9466         if (ret < 0) {
9467                 PMD_DRV_LOG(ERR,
9468                             "Failed to delete ethertype filter"
9469                             " to hash table %d!",
9470                             ret);
9471                 return ret;
9472         }
9473         filter = rule->hash_map[ret];
9474         rule->hash_map[ret] = NULL;
9475
9476         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9477         rte_free(filter);
9478
9479         return 0;
9480 }
9481
9482 /*
9483  * Configure ethertype filter, which can director packet by filtering
9484  * with mac address and ether_type or only ether_type
9485  */
9486 int
9487 i40e_ethertype_filter_set(struct i40e_pf *pf,
9488                         struct rte_eth_ethertype_filter *filter,
9489                         bool add)
9490 {
9491         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9492         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9493         struct i40e_ethertype_filter *ethertype_filter, *node;
9494         struct i40e_ethertype_filter check_filter;
9495         struct i40e_control_filter_stats stats;
9496         uint16_t flags = 0;
9497         int ret;
9498
9499         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9500                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9501                 return -EINVAL;
9502         }
9503         if (filter->ether_type == ETHER_TYPE_IPv4 ||
9504                 filter->ether_type == ETHER_TYPE_IPv6) {
9505                 PMD_DRV_LOG(ERR,
9506                         "unsupported ether_type(0x%04x) in control packet filter.",
9507                         filter->ether_type);
9508                 return -EINVAL;
9509         }
9510         if (filter->ether_type == ETHER_TYPE_VLAN)
9511                 PMD_DRV_LOG(WARNING,
9512                         "filter vlan ether_type in first tag is not supported.");
9513
9514         /* Check if there is the filter in SW list */
9515         memset(&check_filter, 0, sizeof(check_filter));
9516         i40e_ethertype_filter_convert(filter, &check_filter);
9517         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9518                                                &check_filter.input);
9519         if (add && node) {
9520                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9521                 return -EINVAL;
9522         }
9523
9524         if (!add && !node) {
9525                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9526                 return -EINVAL;
9527         }
9528
9529         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9530                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9531         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9532                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9533         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9534
9535         memset(&stats, 0, sizeof(stats));
9536         ret = i40e_aq_add_rem_control_packet_filter(hw,
9537                         filter->mac_addr.addr_bytes,
9538                         filter->ether_type, flags,
9539                         pf->main_vsi->seid,
9540                         filter->queue, add, &stats, NULL);
9541
9542         PMD_DRV_LOG(INFO,
9543                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9544                 ret, stats.mac_etype_used, stats.etype_used,
9545                 stats.mac_etype_free, stats.etype_free);
9546         if (ret < 0)
9547                 return -ENOSYS;
9548
9549         /* Add or delete a filter in SW list */
9550         if (add) {
9551                 ethertype_filter = rte_zmalloc("ethertype_filter",
9552                                        sizeof(*ethertype_filter), 0);
9553                 if (ethertype_filter == NULL) {
9554                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9555                         return -ENOMEM;
9556                 }
9557
9558                 rte_memcpy(ethertype_filter, &check_filter,
9559                            sizeof(check_filter));
9560                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9561                 if (ret < 0)
9562                         rte_free(ethertype_filter);
9563         } else {
9564                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9565         }
9566
9567         return ret;
9568 }
9569
9570 /*
9571  * Handle operations for ethertype filter.
9572  */
9573 static int
9574 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
9575                                 enum rte_filter_op filter_op,
9576                                 void *arg)
9577 {
9578         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9579         int ret = 0;
9580
9581         if (filter_op == RTE_ETH_FILTER_NOP)
9582                 return ret;
9583
9584         if (arg == NULL) {
9585                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
9586                             filter_op);
9587                 return -EINVAL;
9588         }
9589
9590         switch (filter_op) {
9591         case RTE_ETH_FILTER_ADD:
9592                 ret = i40e_ethertype_filter_set(pf,
9593                         (struct rte_eth_ethertype_filter *)arg,
9594                         TRUE);
9595                 break;
9596         case RTE_ETH_FILTER_DELETE:
9597                 ret = i40e_ethertype_filter_set(pf,
9598                         (struct rte_eth_ethertype_filter *)arg,
9599                         FALSE);
9600                 break;
9601         default:
9602                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
9603                 ret = -ENOSYS;
9604                 break;
9605         }
9606         return ret;
9607 }
9608
9609 static int
9610 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9611                      enum rte_filter_type filter_type,
9612                      enum rte_filter_op filter_op,
9613                      void *arg)
9614 {
9615         int ret = 0;
9616
9617         if (dev == NULL)
9618                 return -EINVAL;
9619
9620         switch (filter_type) {
9621         case RTE_ETH_FILTER_NONE:
9622                 /* For global configuration */
9623                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9624                 break;
9625         case RTE_ETH_FILTER_HASH:
9626                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9627                 break;
9628         case RTE_ETH_FILTER_MACVLAN:
9629                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9630                 break;
9631         case RTE_ETH_FILTER_ETHERTYPE:
9632                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9633                 break;
9634         case RTE_ETH_FILTER_TUNNEL:
9635                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9636                 break;
9637         case RTE_ETH_FILTER_FDIR:
9638                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9639                 break;
9640         case RTE_ETH_FILTER_GENERIC:
9641                 if (filter_op != RTE_ETH_FILTER_GET)
9642                         return -EINVAL;
9643                 *(const void **)arg = &i40e_flow_ops;
9644                 break;
9645         default:
9646                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9647                                                         filter_type);
9648                 ret = -EINVAL;
9649                 break;
9650         }
9651
9652         return ret;
9653 }
9654
9655 /*
9656  * Check and enable Extended Tag.
9657  * Enabling Extended Tag is important for 40G performance.
9658  */
9659 static void
9660 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9661 {
9662         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9663         uint32_t buf = 0;
9664         int ret;
9665
9666         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9667                                       PCI_DEV_CAP_REG);
9668         if (ret < 0) {
9669                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9670                             PCI_DEV_CAP_REG);
9671                 return;
9672         }
9673         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9674                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9675                 return;
9676         }
9677
9678         buf = 0;
9679         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9680                                       PCI_DEV_CTRL_REG);
9681         if (ret < 0) {
9682                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9683                             PCI_DEV_CTRL_REG);
9684                 return;
9685         }
9686         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9687                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9688                 return;
9689         }
9690         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9691         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9692                                        PCI_DEV_CTRL_REG);
9693         if (ret < 0) {
9694                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9695                             PCI_DEV_CTRL_REG);
9696                 return;
9697         }
9698 }
9699
9700 /*
9701  * As some registers wouldn't be reset unless a global hardware reset,
9702  * hardware initialization is needed to put those registers into an
9703  * expected initial state.
9704  */
9705 static void
9706 i40e_hw_init(struct rte_eth_dev *dev)
9707 {
9708         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9709
9710         i40e_enable_extended_tag(dev);
9711
9712         /* clear the PF Queue Filter control register */
9713         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9714
9715         /* Disable symmetric hash per port */
9716         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9717 }
9718
9719 /*
9720  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9721  * however this function will return only one highest pctype index,
9722  * which is not quite correct. This is known problem of i40e driver
9723  * and needs to be fixed later.
9724  */
9725 enum i40e_filter_pctype
9726 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9727 {
9728         int i;
9729         uint64_t pctype_mask;
9730
9731         if (flow_type < I40E_FLOW_TYPE_MAX) {
9732                 pctype_mask = adapter->pctypes_tbl[flow_type];
9733                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9734                         if (pctype_mask & (1ULL << i))
9735                                 return (enum i40e_filter_pctype)i;
9736                 }
9737         }
9738         return I40E_FILTER_PCTYPE_INVALID;
9739 }
9740
9741 uint16_t
9742 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9743                         enum i40e_filter_pctype pctype)
9744 {
9745         uint16_t flowtype;
9746         uint64_t pctype_mask = 1ULL << pctype;
9747
9748         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9749              flowtype++) {
9750                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9751                         return flowtype;
9752         }
9753
9754         return RTE_ETH_FLOW_UNKNOWN;
9755 }
9756
9757 /*
9758  * On X710, performance number is far from the expectation on recent firmware
9759  * versions; on XL710, performance number is also far from the expectation on
9760  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9761  * mode is enabled and port MAC address is equal to the packet destination MAC
9762  * address. The fix for this issue may not be integrated in the following
9763  * firmware version. So the workaround in software driver is needed. It needs
9764  * to modify the initial values of 3 internal only registers for both X710 and
9765  * XL710. Note that the values for X710 or XL710 could be different, and the
9766  * workaround can be removed when it is fixed in firmware in the future.
9767  */
9768
9769 /* For both X710 and XL710 */
9770 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
9771 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
9772 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
9773
9774 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9775 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9776
9777 /* For X722 */
9778 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9779 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9780
9781 /* For X710 */
9782 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
9783 /* For XL710 */
9784 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
9785 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
9786
9787 static int
9788 i40e_dev_sync_phy_type(struct i40e_hw *hw)
9789 {
9790         enum i40e_status_code status;
9791         struct i40e_aq_get_phy_abilities_resp phy_ab;
9792         int ret = -ENOTSUP;
9793         int retries = 0;
9794
9795         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
9796                                               NULL);
9797
9798         while (status) {
9799                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
9800                         status);
9801                 retries++;
9802                 rte_delay_us(100000);
9803                 if  (retries < 5)
9804                         status = i40e_aq_get_phy_capabilities(hw, false,
9805                                         true, &phy_ab, NULL);
9806                 else
9807                         return ret;
9808         }
9809         return 0;
9810 }
9811
9812 static void
9813 i40e_configure_registers(struct i40e_hw *hw)
9814 {
9815         static struct {
9816                 uint32_t addr;
9817                 uint64_t val;
9818         } reg_table[] = {
9819                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
9820                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
9821                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
9822         };
9823         uint64_t reg;
9824         uint32_t i;
9825         int ret;
9826
9827         for (i = 0; i < RTE_DIM(reg_table); i++) {
9828                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
9829                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9830                                 reg_table[i].val =
9831                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
9832                         else /* For X710/XL710/XXV710 */
9833                                 if (hw->aq.fw_maj_ver < 6)
9834                                         reg_table[i].val =
9835                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
9836                                 else
9837                                         reg_table[i].val =
9838                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
9839                 }
9840
9841                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
9842                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9843                                 reg_table[i].val =
9844                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9845                         else /* For X710/XL710/XXV710 */
9846                                 reg_table[i].val =
9847                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9848                 }
9849
9850                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
9851                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
9852                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
9853                                 reg_table[i].val =
9854                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
9855                         else /* For X710 */
9856                                 reg_table[i].val =
9857                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
9858                 }
9859
9860                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
9861                                                         &reg, NULL);
9862                 if (ret < 0) {
9863                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
9864                                                         reg_table[i].addr);
9865                         break;
9866                 }
9867                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
9868                                                 reg_table[i].addr, reg);
9869                 if (reg == reg_table[i].val)
9870                         continue;
9871
9872                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
9873                                                 reg_table[i].val, NULL);
9874                 if (ret < 0) {
9875                         PMD_DRV_LOG(ERR,
9876                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
9877                                 reg_table[i].val, reg_table[i].addr);
9878                         break;
9879                 }
9880                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
9881                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
9882         }
9883 }
9884
9885 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
9886 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
9887 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
9888 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
9889 static int
9890 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
9891 {
9892         uint32_t reg;
9893         int ret;
9894
9895         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
9896                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
9897                 return -EINVAL;
9898         }
9899
9900         /* Configure for double VLAN RX stripping */
9901         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
9902         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
9903                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
9904                 ret = i40e_aq_debug_write_register(hw,
9905                                                    I40E_VSI_TSR(vsi->vsi_id),
9906                                                    reg, NULL);
9907                 if (ret < 0) {
9908                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
9909                                     vsi->vsi_id);
9910                         return I40E_ERR_CONFIG;
9911                 }
9912         }
9913
9914         /* Configure for double VLAN TX insertion */
9915         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
9916         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
9917                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
9918                 ret = i40e_aq_debug_write_register(hw,
9919                                                    I40E_VSI_L2TAGSTXVALID(
9920                                                    vsi->vsi_id), reg, NULL);
9921                 if (ret < 0) {
9922                         PMD_DRV_LOG(ERR,
9923                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
9924                                 vsi->vsi_id);
9925                         return I40E_ERR_CONFIG;
9926                 }
9927         }
9928
9929         return 0;
9930 }
9931
9932 /**
9933  * i40e_aq_add_mirror_rule
9934  * @hw: pointer to the hardware structure
9935  * @seid: VEB seid to add mirror rule to
9936  * @dst_id: destination vsi seid
9937  * @entries: Buffer which contains the entities to be mirrored
9938  * @count: number of entities contained in the buffer
9939  * @rule_id:the rule_id of the rule to be added
9940  *
9941  * Add a mirror rule for a given veb.
9942  *
9943  **/
9944 static enum i40e_status_code
9945 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
9946                         uint16_t seid, uint16_t dst_id,
9947                         uint16_t rule_type, uint16_t *entries,
9948                         uint16_t count, uint16_t *rule_id)
9949 {
9950         struct i40e_aq_desc desc;
9951         struct i40e_aqc_add_delete_mirror_rule cmd;
9952         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
9953                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
9954                 &desc.params.raw;
9955         uint16_t buff_len;
9956         enum i40e_status_code status;
9957
9958         i40e_fill_default_direct_cmd_desc(&desc,
9959                                           i40e_aqc_opc_add_mirror_rule);
9960         memset(&cmd, 0, sizeof(cmd));
9961
9962         buff_len = sizeof(uint16_t) * count;
9963         desc.datalen = rte_cpu_to_le_16(buff_len);
9964         if (buff_len > 0)
9965                 desc.flags |= rte_cpu_to_le_16(
9966                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
9967         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
9968                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
9969         cmd.num_entries = rte_cpu_to_le_16(count);
9970         cmd.seid = rte_cpu_to_le_16(seid);
9971         cmd.destination = rte_cpu_to_le_16(dst_id);
9972
9973         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
9974         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
9975         PMD_DRV_LOG(INFO,
9976                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
9977                 hw->aq.asq_last_status, resp->rule_id,
9978                 resp->mirror_rules_used, resp->mirror_rules_free);
9979         *rule_id = rte_le_to_cpu_16(resp->rule_id);
9980
9981         return status;
9982 }
9983
9984 /**
9985  * i40e_aq_del_mirror_rule
9986  * @hw: pointer to the hardware structure
9987  * @seid: VEB seid to add mirror rule to
9988  * @entries: Buffer which contains the entities to be mirrored
9989  * @count: number of entities contained in the buffer
9990  * @rule_id:the rule_id of the rule to be delete
9991  *
9992  * Delete a mirror rule for a given veb.
9993  *
9994  **/
9995 static enum i40e_status_code
9996 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
9997                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
9998                 uint16_t count, uint16_t rule_id)
9999 {
10000         struct i40e_aq_desc desc;
10001         struct i40e_aqc_add_delete_mirror_rule cmd;
10002         uint16_t buff_len = 0;
10003         enum i40e_status_code status;
10004         void *buff = NULL;
10005
10006         i40e_fill_default_direct_cmd_desc(&desc,
10007                                           i40e_aqc_opc_delete_mirror_rule);
10008         memset(&cmd, 0, sizeof(cmd));
10009         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10010                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10011                                                           I40E_AQ_FLAG_RD));
10012                 cmd.num_entries = count;
10013                 buff_len = sizeof(uint16_t) * count;
10014                 desc.datalen = rte_cpu_to_le_16(buff_len);
10015                 buff = (void *)entries;
10016         } else
10017                 /* rule id is filled in destination field for deleting mirror rule */
10018                 cmd.destination = rte_cpu_to_le_16(rule_id);
10019
10020         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10021                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10022         cmd.seid = rte_cpu_to_le_16(seid);
10023
10024         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10025         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10026
10027         return status;
10028 }
10029
10030 /**
10031  * i40e_mirror_rule_set
10032  * @dev: pointer to the hardware structure
10033  * @mirror_conf: mirror rule info
10034  * @sw_id: mirror rule's sw_id
10035  * @on: enable/disable
10036  *
10037  * set a mirror rule.
10038  *
10039  **/
10040 static int
10041 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10042                         struct rte_eth_mirror_conf *mirror_conf,
10043                         uint8_t sw_id, uint8_t on)
10044 {
10045         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10046         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10047         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10048         struct i40e_mirror_rule *parent = NULL;
10049         uint16_t seid, dst_seid, rule_id;
10050         uint16_t i, j = 0;
10051         int ret;
10052
10053         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10054
10055         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10056                 PMD_DRV_LOG(ERR,
10057                         "mirror rule can not be configured without veb or vfs.");
10058                 return -ENOSYS;
10059         }
10060         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10061                 PMD_DRV_LOG(ERR, "mirror table is full.");
10062                 return -ENOSPC;
10063         }
10064         if (mirror_conf->dst_pool > pf->vf_num) {
10065                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10066                                  mirror_conf->dst_pool);
10067                 return -EINVAL;
10068         }
10069
10070         seid = pf->main_vsi->veb->seid;
10071
10072         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10073                 if (sw_id <= it->index) {
10074                         mirr_rule = it;
10075                         break;
10076                 }
10077                 parent = it;
10078         }
10079         if (mirr_rule && sw_id == mirr_rule->index) {
10080                 if (on) {
10081                         PMD_DRV_LOG(ERR, "mirror rule exists.");
10082                         return -EEXIST;
10083                 } else {
10084                         ret = i40e_aq_del_mirror_rule(hw, seid,
10085                                         mirr_rule->rule_type,
10086                                         mirr_rule->entries,
10087                                         mirr_rule->num_entries, mirr_rule->id);
10088                         if (ret < 0) {
10089                                 PMD_DRV_LOG(ERR,
10090                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
10091                                         ret, hw->aq.asq_last_status);
10092                                 return -ENOSYS;
10093                         }
10094                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10095                         rte_free(mirr_rule);
10096                         pf->nb_mirror_rule--;
10097                         return 0;
10098                 }
10099         } else if (!on) {
10100                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10101                 return -ENOENT;
10102         }
10103
10104         mirr_rule = rte_zmalloc("i40e_mirror_rule",
10105                                 sizeof(struct i40e_mirror_rule) , 0);
10106         if (!mirr_rule) {
10107                 PMD_DRV_LOG(ERR, "failed to allocate memory");
10108                 return I40E_ERR_NO_MEMORY;
10109         }
10110         switch (mirror_conf->rule_type) {
10111         case ETH_MIRROR_VLAN:
10112                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10113                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10114                                 mirr_rule->entries[j] =
10115                                         mirror_conf->vlan.vlan_id[i];
10116                                 j++;
10117                         }
10118                 }
10119                 if (j == 0) {
10120                         PMD_DRV_LOG(ERR, "vlan is not specified.");
10121                         rte_free(mirr_rule);
10122                         return -EINVAL;
10123                 }
10124                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10125                 break;
10126         case ETH_MIRROR_VIRTUAL_POOL_UP:
10127         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10128                 /* check if the specified pool bit is out of range */
10129                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10130                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
10131                         rte_free(mirr_rule);
10132                         return -EINVAL;
10133                 }
10134                 for (i = 0, j = 0; i < pf->vf_num; i++) {
10135                         if (mirror_conf->pool_mask & (1ULL << i)) {
10136                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10137                                 j++;
10138                         }
10139                 }
10140                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10141                         /* add pf vsi to entries */
10142                         mirr_rule->entries[j] = pf->main_vsi_seid;
10143                         j++;
10144                 }
10145                 if (j == 0) {
10146                         PMD_DRV_LOG(ERR, "pool is not specified.");
10147                         rte_free(mirr_rule);
10148                         return -EINVAL;
10149                 }
10150                 /* egress and ingress in aq commands means from switch but not port */
10151                 mirr_rule->rule_type =
10152                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10153                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10154                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10155                 break;
10156         case ETH_MIRROR_UPLINK_PORT:
10157                 /* egress and ingress in aq commands means from switch but not port*/
10158                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10159                 break;
10160         case ETH_MIRROR_DOWNLINK_PORT:
10161                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10162                 break;
10163         default:
10164                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10165                         mirror_conf->rule_type);
10166                 rte_free(mirr_rule);
10167                 return -EINVAL;
10168         }
10169
10170         /* If the dst_pool is equal to vf_num, consider it as PF */
10171         if (mirror_conf->dst_pool == pf->vf_num)
10172                 dst_seid = pf->main_vsi_seid;
10173         else
10174                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10175
10176         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10177                                       mirr_rule->rule_type, mirr_rule->entries,
10178                                       j, &rule_id);
10179         if (ret < 0) {
10180                 PMD_DRV_LOG(ERR,
10181                         "failed to add mirror rule: ret = %d, aq_err = %d.",
10182                         ret, hw->aq.asq_last_status);
10183                 rte_free(mirr_rule);
10184                 return -ENOSYS;
10185         }
10186
10187         mirr_rule->index = sw_id;
10188         mirr_rule->num_entries = j;
10189         mirr_rule->id = rule_id;
10190         mirr_rule->dst_vsi_seid = dst_seid;
10191
10192         if (parent)
10193                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10194         else
10195                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10196
10197         pf->nb_mirror_rule++;
10198         return 0;
10199 }
10200
10201 /**
10202  * i40e_mirror_rule_reset
10203  * @dev: pointer to the device
10204  * @sw_id: mirror rule's sw_id
10205  *
10206  * reset a mirror rule.
10207  *
10208  **/
10209 static int
10210 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10211 {
10212         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10213         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10214         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10215         uint16_t seid;
10216         int ret;
10217
10218         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10219
10220         seid = pf->main_vsi->veb->seid;
10221
10222         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10223                 if (sw_id == it->index) {
10224                         mirr_rule = it;
10225                         break;
10226                 }
10227         }
10228         if (mirr_rule) {
10229                 ret = i40e_aq_del_mirror_rule(hw, seid,
10230                                 mirr_rule->rule_type,
10231                                 mirr_rule->entries,
10232                                 mirr_rule->num_entries, mirr_rule->id);
10233                 if (ret < 0) {
10234                         PMD_DRV_LOG(ERR,
10235                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
10236                                 ret, hw->aq.asq_last_status);
10237                         return -ENOSYS;
10238                 }
10239                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10240                 rte_free(mirr_rule);
10241                 pf->nb_mirror_rule--;
10242         } else {
10243                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10244                 return -ENOENT;
10245         }
10246         return 0;
10247 }
10248
10249 static uint64_t
10250 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10251 {
10252         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10253         uint64_t systim_cycles;
10254
10255         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10256         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10257                         << 32;
10258
10259         return systim_cycles;
10260 }
10261
10262 static uint64_t
10263 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10264 {
10265         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10266         uint64_t rx_tstamp;
10267
10268         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10269         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10270                         << 32;
10271
10272         return rx_tstamp;
10273 }
10274
10275 static uint64_t
10276 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10277 {
10278         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10279         uint64_t tx_tstamp;
10280
10281         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10282         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10283                         << 32;
10284
10285         return tx_tstamp;
10286 }
10287
10288 static void
10289 i40e_start_timecounters(struct rte_eth_dev *dev)
10290 {
10291         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10292         struct i40e_adapter *adapter =
10293                         (struct i40e_adapter *)dev->data->dev_private;
10294         struct rte_eth_link link;
10295         uint32_t tsync_inc_l;
10296         uint32_t tsync_inc_h;
10297
10298         /* Get current link speed. */
10299         memset(&link, 0, sizeof(link));
10300         i40e_dev_link_update(dev, 1);
10301         rte_i40e_dev_atomic_read_link_status(dev, &link);
10302
10303         switch (link.link_speed) {
10304         case ETH_SPEED_NUM_40G:
10305                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10306                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10307                 break;
10308         case ETH_SPEED_NUM_10G:
10309                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10310                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10311                 break;
10312         case ETH_SPEED_NUM_1G:
10313                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10314                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10315                 break;
10316         default:
10317                 tsync_inc_l = 0x0;
10318                 tsync_inc_h = 0x0;
10319         }
10320
10321         /* Set the timesync increment value. */
10322         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10323         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10324
10325         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10326         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10327         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10328
10329         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10330         adapter->systime_tc.cc_shift = 0;
10331         adapter->systime_tc.nsec_mask = 0;
10332
10333         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10334         adapter->rx_tstamp_tc.cc_shift = 0;
10335         adapter->rx_tstamp_tc.nsec_mask = 0;
10336
10337         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10338         adapter->tx_tstamp_tc.cc_shift = 0;
10339         adapter->tx_tstamp_tc.nsec_mask = 0;
10340 }
10341
10342 static int
10343 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10344 {
10345         struct i40e_adapter *adapter =
10346                         (struct i40e_adapter *)dev->data->dev_private;
10347
10348         adapter->systime_tc.nsec += delta;
10349         adapter->rx_tstamp_tc.nsec += delta;
10350         adapter->tx_tstamp_tc.nsec += delta;
10351
10352         return 0;
10353 }
10354
10355 static int
10356 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10357 {
10358         uint64_t ns;
10359         struct i40e_adapter *adapter =
10360                         (struct i40e_adapter *)dev->data->dev_private;
10361
10362         ns = rte_timespec_to_ns(ts);
10363
10364         /* Set the timecounters to a new value. */
10365         adapter->systime_tc.nsec = ns;
10366         adapter->rx_tstamp_tc.nsec = ns;
10367         adapter->tx_tstamp_tc.nsec = ns;
10368
10369         return 0;
10370 }
10371
10372 static int
10373 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10374 {
10375         uint64_t ns, systime_cycles;
10376         struct i40e_adapter *adapter =
10377                         (struct i40e_adapter *)dev->data->dev_private;
10378
10379         systime_cycles = i40e_read_systime_cyclecounter(dev);
10380         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10381         *ts = rte_ns_to_timespec(ns);
10382
10383         return 0;
10384 }
10385
10386 static int
10387 i40e_timesync_enable(struct rte_eth_dev *dev)
10388 {
10389         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10390         uint32_t tsync_ctl_l;
10391         uint32_t tsync_ctl_h;
10392
10393         /* Stop the timesync system time. */
10394         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10395         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10396         /* Reset the timesync system time value. */
10397         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10398         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10399
10400         i40e_start_timecounters(dev);
10401
10402         /* Clear timesync registers. */
10403         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10404         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10405         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10406         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10407         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10408         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10409
10410         /* Enable timestamping of PTP packets. */
10411         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10412         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10413
10414         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10415         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10416         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10417
10418         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10419         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10420
10421         return 0;
10422 }
10423
10424 static int
10425 i40e_timesync_disable(struct rte_eth_dev *dev)
10426 {
10427         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10428         uint32_t tsync_ctl_l;
10429         uint32_t tsync_ctl_h;
10430
10431         /* Disable timestamping of transmitted PTP packets. */
10432         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10433         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10434
10435         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10436         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10437
10438         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10439         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10440
10441         /* Reset the timesync increment value. */
10442         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10443         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10444
10445         return 0;
10446 }
10447
10448 static int
10449 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10450                                 struct timespec *timestamp, uint32_t flags)
10451 {
10452         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10453         struct i40e_adapter *adapter =
10454                 (struct i40e_adapter *)dev->data->dev_private;
10455
10456         uint32_t sync_status;
10457         uint32_t index = flags & 0x03;
10458         uint64_t rx_tstamp_cycles;
10459         uint64_t ns;
10460
10461         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10462         if ((sync_status & (1 << index)) == 0)
10463                 return -EINVAL;
10464
10465         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10466         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10467         *timestamp = rte_ns_to_timespec(ns);
10468
10469         return 0;
10470 }
10471
10472 static int
10473 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10474                                 struct timespec *timestamp)
10475 {
10476         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10477         struct i40e_adapter *adapter =
10478                 (struct i40e_adapter *)dev->data->dev_private;
10479
10480         uint32_t sync_status;
10481         uint64_t tx_tstamp_cycles;
10482         uint64_t ns;
10483
10484         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10485         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10486                 return -EINVAL;
10487
10488         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10489         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10490         *timestamp = rte_ns_to_timespec(ns);
10491
10492         return 0;
10493 }
10494
10495 /*
10496  * i40e_parse_dcb_configure - parse dcb configure from user
10497  * @dev: the device being configured
10498  * @dcb_cfg: pointer of the result of parse
10499  * @*tc_map: bit map of enabled traffic classes
10500  *
10501  * Returns 0 on success, negative value on failure
10502  */
10503 static int
10504 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10505                          struct i40e_dcbx_config *dcb_cfg,
10506                          uint8_t *tc_map)
10507 {
10508         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10509         uint8_t i, tc_bw, bw_lf;
10510
10511         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10512
10513         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10514         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10515                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10516                 return -EINVAL;
10517         }
10518
10519         /* assume each tc has the same bw */
10520         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10521         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10522                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10523         /* to ensure the sum of tcbw is equal to 100 */
10524         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10525         for (i = 0; i < bw_lf; i++)
10526                 dcb_cfg->etscfg.tcbwtable[i]++;
10527
10528         /* assume each tc has the same Transmission Selection Algorithm */
10529         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10530                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10531
10532         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10533                 dcb_cfg->etscfg.prioritytable[i] =
10534                                 dcb_rx_conf->dcb_tc[i];
10535
10536         /* FW needs one App to configure HW */
10537         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10538         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10539         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10540         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10541
10542         if (dcb_rx_conf->nb_tcs == 0)
10543                 *tc_map = 1; /* tc0 only */
10544         else
10545                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10546
10547         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10548                 dcb_cfg->pfc.willing = 0;
10549                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10550                 dcb_cfg->pfc.pfcenable = *tc_map;
10551         }
10552         return 0;
10553 }
10554
10555
10556 static enum i40e_status_code
10557 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10558                               struct i40e_aqc_vsi_properties_data *info,
10559                               uint8_t enabled_tcmap)
10560 {
10561         enum i40e_status_code ret;
10562         int i, total_tc = 0;
10563         uint16_t qpnum_per_tc, bsf, qp_idx;
10564         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10565         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10566         uint16_t used_queues;
10567
10568         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10569         if (ret != I40E_SUCCESS)
10570                 return ret;
10571
10572         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10573                 if (enabled_tcmap & (1 << i))
10574                         total_tc++;
10575         }
10576         if (total_tc == 0)
10577                 total_tc = 1;
10578         vsi->enabled_tc = enabled_tcmap;
10579
10580         /* different VSI has different queues assigned */
10581         if (vsi->type == I40E_VSI_MAIN)
10582                 used_queues = dev_data->nb_rx_queues -
10583                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10584         else if (vsi->type == I40E_VSI_VMDQ2)
10585                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10586         else {
10587                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10588                 return I40E_ERR_NO_AVAILABLE_VSI;
10589         }
10590
10591         qpnum_per_tc = used_queues / total_tc;
10592         /* Number of queues per enabled TC */
10593         if (qpnum_per_tc == 0) {
10594                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10595                 return I40E_ERR_INVALID_QP_ID;
10596         }
10597         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10598                                 I40E_MAX_Q_PER_TC);
10599         bsf = rte_bsf32(qpnum_per_tc);
10600
10601         /**
10602          * Configure TC and queue mapping parameters, for enabled TC,
10603          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10604          * default queue will serve it.
10605          */
10606         qp_idx = 0;
10607         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10608                 if (vsi->enabled_tc & (1 << i)) {
10609                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10610                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10611                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10612                         qp_idx += qpnum_per_tc;
10613                 } else
10614                         info->tc_mapping[i] = 0;
10615         }
10616
10617         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10618         if (vsi->type == I40E_VSI_SRIOV) {
10619                 info->mapping_flags |=
10620                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10621                 for (i = 0; i < vsi->nb_qps; i++)
10622                         info->queue_mapping[i] =
10623                                 rte_cpu_to_le_16(vsi->base_queue + i);
10624         } else {
10625                 info->mapping_flags |=
10626                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10627                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10628         }
10629         info->valid_sections |=
10630                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10631
10632         return I40E_SUCCESS;
10633 }
10634
10635 /*
10636  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10637  * @veb: VEB to be configured
10638  * @tc_map: enabled TC bitmap
10639  *
10640  * Returns 0 on success, negative value on failure
10641  */
10642 static enum i40e_status_code
10643 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10644 {
10645         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10646         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10647         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10648         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10649         enum i40e_status_code ret = I40E_SUCCESS;
10650         int i;
10651         uint32_t bw_max;
10652
10653         /* Check if enabled_tc is same as existing or new TCs */
10654         if (veb->enabled_tc == tc_map)
10655                 return ret;
10656
10657         /* configure tc bandwidth */
10658         memset(&veb_bw, 0, sizeof(veb_bw));
10659         veb_bw.tc_valid_bits = tc_map;
10660         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10661         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10662                 if (tc_map & BIT_ULL(i))
10663                         veb_bw.tc_bw_share_credits[i] = 1;
10664         }
10665         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10666                                                    &veb_bw, NULL);
10667         if (ret) {
10668                 PMD_INIT_LOG(ERR,
10669                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10670                         hw->aq.asq_last_status);
10671                 return ret;
10672         }
10673
10674         memset(&ets_query, 0, sizeof(ets_query));
10675         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10676                                                    &ets_query, NULL);
10677         if (ret != I40E_SUCCESS) {
10678                 PMD_DRV_LOG(ERR,
10679                         "Failed to get switch_comp ETS configuration %u",
10680                         hw->aq.asq_last_status);
10681                 return ret;
10682         }
10683         memset(&bw_query, 0, sizeof(bw_query));
10684         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10685                                                   &bw_query, NULL);
10686         if (ret != I40E_SUCCESS) {
10687                 PMD_DRV_LOG(ERR,
10688                         "Failed to get switch_comp bandwidth configuration %u",
10689                         hw->aq.asq_last_status);
10690                 return ret;
10691         }
10692
10693         /* store and print out BW info */
10694         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10695         veb->bw_info.bw_max = ets_query.tc_bw_max;
10696         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10697         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10698         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10699                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10700                      I40E_16_BIT_WIDTH);
10701         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10702                 veb->bw_info.bw_ets_share_credits[i] =
10703                                 bw_query.tc_bw_share_credits[i];
10704                 veb->bw_info.bw_ets_credits[i] =
10705                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10706                 /* 4 bits per TC, 4th bit is reserved */
10707                 veb->bw_info.bw_ets_max[i] =
10708                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10709                                   RTE_LEN2MASK(3, uint8_t));
10710                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10711                             veb->bw_info.bw_ets_share_credits[i]);
10712                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10713                             veb->bw_info.bw_ets_credits[i]);
10714                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10715                             veb->bw_info.bw_ets_max[i]);
10716         }
10717
10718         veb->enabled_tc = tc_map;
10719
10720         return ret;
10721 }
10722
10723
10724 /*
10725  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10726  * @vsi: VSI to be configured
10727  * @tc_map: enabled TC bitmap
10728  *
10729  * Returns 0 on success, negative value on failure
10730  */
10731 static enum i40e_status_code
10732 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10733 {
10734         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10735         struct i40e_vsi_context ctxt;
10736         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10737         enum i40e_status_code ret = I40E_SUCCESS;
10738         int i;
10739
10740         /* Check if enabled_tc is same as existing or new TCs */
10741         if (vsi->enabled_tc == tc_map)
10742                 return ret;
10743
10744         /* configure tc bandwidth */
10745         memset(&bw_data, 0, sizeof(bw_data));
10746         bw_data.tc_valid_bits = tc_map;
10747         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10748         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10749                 if (tc_map & BIT_ULL(i))
10750                         bw_data.tc_bw_credits[i] = 1;
10751         }
10752         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10753         if (ret) {
10754                 PMD_INIT_LOG(ERR,
10755                         "AQ command Config VSI BW allocation per TC failed = %d",
10756                         hw->aq.asq_last_status);
10757                 goto out;
10758         }
10759         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10760                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10761
10762         /* Update Queue Pairs Mapping for currently enabled UPs */
10763         ctxt.seid = vsi->seid;
10764         ctxt.pf_num = hw->pf_id;
10765         ctxt.vf_num = 0;
10766         ctxt.uplink_seid = vsi->uplink_seid;
10767         ctxt.info = vsi->info;
10768         i40e_get_cap(hw);
10769         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10770         if (ret)
10771                 goto out;
10772
10773         /* Update the VSI after updating the VSI queue-mapping information */
10774         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10775         if (ret) {
10776                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10777                         hw->aq.asq_last_status);
10778                 goto out;
10779         }
10780         /* update the local VSI info with updated queue map */
10781         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10782                                         sizeof(vsi->info.tc_mapping));
10783         rte_memcpy(&vsi->info.queue_mapping,
10784                         &ctxt.info.queue_mapping,
10785                 sizeof(vsi->info.queue_mapping));
10786         vsi->info.mapping_flags = ctxt.info.mapping_flags;
10787         vsi->info.valid_sections = 0;
10788
10789         /* query and update current VSI BW information */
10790         ret = i40e_vsi_get_bw_config(vsi);
10791         if (ret) {
10792                 PMD_INIT_LOG(ERR,
10793                          "Failed updating vsi bw info, err %s aq_err %s",
10794                          i40e_stat_str(hw, ret),
10795                          i40e_aq_str(hw, hw->aq.asq_last_status));
10796                 goto out;
10797         }
10798
10799         vsi->enabled_tc = tc_map;
10800
10801 out:
10802         return ret;
10803 }
10804
10805 /*
10806  * i40e_dcb_hw_configure - program the dcb setting to hw
10807  * @pf: pf the configuration is taken on
10808  * @new_cfg: new configuration
10809  * @tc_map: enabled TC bitmap
10810  *
10811  * Returns 0 on success, negative value on failure
10812  */
10813 static enum i40e_status_code
10814 i40e_dcb_hw_configure(struct i40e_pf *pf,
10815                       struct i40e_dcbx_config *new_cfg,
10816                       uint8_t tc_map)
10817 {
10818         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10819         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
10820         struct i40e_vsi *main_vsi = pf->main_vsi;
10821         struct i40e_vsi_list *vsi_list;
10822         enum i40e_status_code ret;
10823         int i;
10824         uint32_t val;
10825
10826         /* Use the FW API if FW > v4.4*/
10827         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
10828               (hw->aq.fw_maj_ver >= 5))) {
10829                 PMD_INIT_LOG(ERR,
10830                         "FW < v4.4, can not use FW LLDP API to configure DCB");
10831                 return I40E_ERR_FIRMWARE_API_VERSION;
10832         }
10833
10834         /* Check if need reconfiguration */
10835         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
10836                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
10837                 return I40E_SUCCESS;
10838         }
10839
10840         /* Copy the new config to the current config */
10841         *old_cfg = *new_cfg;
10842         old_cfg->etsrec = old_cfg->etscfg;
10843         ret = i40e_set_dcb_config(hw);
10844         if (ret) {
10845                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
10846                          i40e_stat_str(hw, ret),
10847                          i40e_aq_str(hw, hw->aq.asq_last_status));
10848                 return ret;
10849         }
10850         /* set receive Arbiter to RR mode and ETS scheme by default */
10851         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
10852                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
10853                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
10854                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
10855                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
10856                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
10857                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
10858                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
10859                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
10860                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
10861                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
10862                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
10863                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
10864         }
10865         /* get local mib to check whether it is configured correctly */
10866         /* IEEE mode */
10867         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
10868         /* Get Local DCB Config */
10869         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
10870                                      &hw->local_dcbx_config);
10871
10872         /* if Veb is created, need to update TC of it at first */
10873         if (main_vsi->veb) {
10874                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
10875                 if (ret)
10876                         PMD_INIT_LOG(WARNING,
10877                                  "Failed configuring TC for VEB seid=%d",
10878                                  main_vsi->veb->seid);
10879         }
10880         /* Update each VSI */
10881         i40e_vsi_config_tc(main_vsi, tc_map);
10882         if (main_vsi->veb) {
10883                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
10884                         /* Beside main VSI and VMDQ VSIs, only enable default
10885                          * TC for other VSIs
10886                          */
10887                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
10888                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10889                                                          tc_map);
10890                         else
10891                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10892                                                          I40E_DEFAULT_TCMAP);
10893                         if (ret)
10894                                 PMD_INIT_LOG(WARNING,
10895                                         "Failed configuring TC for VSI seid=%d",
10896                                         vsi_list->vsi->seid);
10897                         /* continue */
10898                 }
10899         }
10900         return I40E_SUCCESS;
10901 }
10902
10903 /*
10904  * i40e_dcb_init_configure - initial dcb config
10905  * @dev: device being configured
10906  * @sw_dcb: indicate whether dcb is sw configured or hw offload
10907  *
10908  * Returns 0 on success, negative value on failure
10909  */
10910 int
10911 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
10912 {
10913         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10914         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10915         int i, ret = 0;
10916
10917         if ((pf->flags & I40E_FLAG_DCB) == 0) {
10918                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10919                 return -ENOTSUP;
10920         }
10921
10922         /* DCB initialization:
10923          * Update DCB configuration from the Firmware and configure
10924          * LLDP MIB change event.
10925          */
10926         if (sw_dcb == TRUE) {
10927                 ret = i40e_init_dcb(hw);
10928                 /* If lldp agent is stopped, the return value from
10929                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
10930                  * adminq status. Otherwise, it should return success.
10931                  */
10932                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
10933                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
10934                         memset(&hw->local_dcbx_config, 0,
10935                                 sizeof(struct i40e_dcbx_config));
10936                         /* set dcb default configuration */
10937                         hw->local_dcbx_config.etscfg.willing = 0;
10938                         hw->local_dcbx_config.etscfg.maxtcs = 0;
10939                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
10940                         hw->local_dcbx_config.etscfg.tsatable[0] =
10941                                                 I40E_IEEE_TSA_ETS;
10942                         /* all UPs mapping to TC0 */
10943                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10944                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
10945                         hw->local_dcbx_config.etsrec =
10946                                 hw->local_dcbx_config.etscfg;
10947                         hw->local_dcbx_config.pfc.willing = 0;
10948                         hw->local_dcbx_config.pfc.pfccap =
10949                                                 I40E_MAX_TRAFFIC_CLASS;
10950                         /* FW needs one App to configure HW */
10951                         hw->local_dcbx_config.numapps = 1;
10952                         hw->local_dcbx_config.app[0].selector =
10953                                                 I40E_APP_SEL_ETHTYPE;
10954                         hw->local_dcbx_config.app[0].priority = 3;
10955                         hw->local_dcbx_config.app[0].protocolid =
10956                                                 I40E_APP_PROTOID_FCOE;
10957                         ret = i40e_set_dcb_config(hw);
10958                         if (ret) {
10959                                 PMD_INIT_LOG(ERR,
10960                                         "default dcb config fails. err = %d, aq_err = %d.",
10961                                         ret, hw->aq.asq_last_status);
10962                                 return -ENOSYS;
10963                         }
10964                 } else {
10965                         PMD_INIT_LOG(ERR,
10966                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
10967                                 ret, hw->aq.asq_last_status);
10968                         return -ENOTSUP;
10969                 }
10970         } else {
10971                 ret = i40e_aq_start_lldp(hw, NULL);
10972                 if (ret != I40E_SUCCESS)
10973                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
10974
10975                 ret = i40e_init_dcb(hw);
10976                 if (!ret) {
10977                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
10978                                 PMD_INIT_LOG(ERR,
10979                                         "HW doesn't support DCBX offload.");
10980                                 return -ENOTSUP;
10981                         }
10982                 } else {
10983                         PMD_INIT_LOG(ERR,
10984                                 "DCBX configuration failed, err = %d, aq_err = %d.",
10985                                 ret, hw->aq.asq_last_status);
10986                         return -ENOTSUP;
10987                 }
10988         }
10989         return 0;
10990 }
10991
10992 /*
10993  * i40e_dcb_setup - setup dcb related config
10994  * @dev: device being configured
10995  *
10996  * Returns 0 on success, negative value on failure
10997  */
10998 static int
10999 i40e_dcb_setup(struct rte_eth_dev *dev)
11000 {
11001         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11002         struct i40e_dcbx_config dcb_cfg;
11003         uint8_t tc_map = 0;
11004         int ret = 0;
11005
11006         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11007                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11008                 return -ENOTSUP;
11009         }
11010
11011         if (pf->vf_num != 0)
11012                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11013
11014         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11015         if (ret) {
11016                 PMD_INIT_LOG(ERR, "invalid dcb config");
11017                 return -EINVAL;
11018         }
11019         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11020         if (ret) {
11021                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11022                 return -ENOSYS;
11023         }
11024
11025         return 0;
11026 }
11027
11028 static int
11029 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11030                       struct rte_eth_dcb_info *dcb_info)
11031 {
11032         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11033         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11034         struct i40e_vsi *vsi = pf->main_vsi;
11035         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11036         uint16_t bsf, tc_mapping;
11037         int i, j = 0;
11038
11039         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11040                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11041         else
11042                 dcb_info->nb_tcs = 1;
11043         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11044                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11045         for (i = 0; i < dcb_info->nb_tcs; i++)
11046                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11047
11048         /* get queue mapping if vmdq is disabled */
11049         if (!pf->nb_cfg_vmdq_vsi) {
11050                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11051                         if (!(vsi->enabled_tc & (1 << i)))
11052                                 continue;
11053                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11054                         dcb_info->tc_queue.tc_rxq[j][i].base =
11055                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11056                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11057                         dcb_info->tc_queue.tc_txq[j][i].base =
11058                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11059                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11060                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11061                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11062                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11063                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11064                 }
11065                 return 0;
11066         }
11067
11068         /* get queue mapping if vmdq is enabled */
11069         do {
11070                 vsi = pf->vmdq[j].vsi;
11071                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11072                         if (!(vsi->enabled_tc & (1 << i)))
11073                                 continue;
11074                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11075                         dcb_info->tc_queue.tc_rxq[j][i].base =
11076                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11077                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11078                         dcb_info->tc_queue.tc_txq[j][i].base =
11079                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11080                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11081                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11082                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11083                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11084                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11085                 }
11086                 j++;
11087         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11088         return 0;
11089 }
11090
11091 static int
11092 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11093 {
11094         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11095         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11096         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11097         uint16_t interval =
11098                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1);
11099         uint16_t msix_intr;
11100
11101         msix_intr = intr_handle->intr_vec[queue_id];
11102         if (msix_intr == I40E_MISC_VEC_ID)
11103                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11104                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
11105                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11106                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
11107                                (interval <<
11108                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
11109         else
11110                 I40E_WRITE_REG(hw,
11111                                I40E_PFINT_DYN_CTLN(msix_intr -
11112                                                    I40E_RX_VEC_START),
11113                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
11114                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11115                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
11116                                (interval <<
11117                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
11118
11119         I40E_WRITE_FLUSH(hw);
11120         rte_intr_enable(&pci_dev->intr_handle);
11121
11122         return 0;
11123 }
11124
11125 static int
11126 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11127 {
11128         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11129         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11130         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11131         uint16_t msix_intr;
11132
11133         msix_intr = intr_handle->intr_vec[queue_id];
11134         if (msix_intr == I40E_MISC_VEC_ID)
11135                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
11136         else
11137                 I40E_WRITE_REG(hw,
11138                                I40E_PFINT_DYN_CTLN(msix_intr -
11139                                                    I40E_RX_VEC_START),
11140                                0);
11141         I40E_WRITE_FLUSH(hw);
11142
11143         return 0;
11144 }
11145
11146 static int i40e_get_regs(struct rte_eth_dev *dev,
11147                          struct rte_dev_reg_info *regs)
11148 {
11149         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11150         uint32_t *ptr_data = regs->data;
11151         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11152         const struct i40e_reg_info *reg_info;
11153
11154         if (ptr_data == NULL) {
11155                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11156                 regs->width = sizeof(uint32_t);
11157                 return 0;
11158         }
11159
11160         /* The first few registers have to be read using AQ operations */
11161         reg_idx = 0;
11162         while (i40e_regs_adminq[reg_idx].name) {
11163                 reg_info = &i40e_regs_adminq[reg_idx++];
11164                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11165                         for (arr_idx2 = 0;
11166                                         arr_idx2 <= reg_info->count2;
11167                                         arr_idx2++) {
11168                                 reg_offset = arr_idx * reg_info->stride1 +
11169                                         arr_idx2 * reg_info->stride2;
11170                                 reg_offset += reg_info->base_addr;
11171                                 ptr_data[reg_offset >> 2] =
11172                                         i40e_read_rx_ctl(hw, reg_offset);
11173                         }
11174         }
11175
11176         /* The remaining registers can be read using primitives */
11177         reg_idx = 0;
11178         while (i40e_regs_others[reg_idx].name) {
11179                 reg_info = &i40e_regs_others[reg_idx++];
11180                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11181                         for (arr_idx2 = 0;
11182                                         arr_idx2 <= reg_info->count2;
11183                                         arr_idx2++) {
11184                                 reg_offset = arr_idx * reg_info->stride1 +
11185                                         arr_idx2 * reg_info->stride2;
11186                                 reg_offset += reg_info->base_addr;
11187                                 ptr_data[reg_offset >> 2] =
11188                                         I40E_READ_REG(hw, reg_offset);
11189                         }
11190         }
11191
11192         return 0;
11193 }
11194
11195 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11196 {
11197         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11198
11199         /* Convert word count to byte count */
11200         return hw->nvm.sr_size << 1;
11201 }
11202
11203 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11204                            struct rte_dev_eeprom_info *eeprom)
11205 {
11206         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11207         uint16_t *data = eeprom->data;
11208         uint16_t offset, length, cnt_words;
11209         int ret_code;
11210
11211         offset = eeprom->offset >> 1;
11212         length = eeprom->length >> 1;
11213         cnt_words = length;
11214
11215         if (offset > hw->nvm.sr_size ||
11216                 offset + length > hw->nvm.sr_size) {
11217                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11218                 return -EINVAL;
11219         }
11220
11221         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11222
11223         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11224         if (ret_code != I40E_SUCCESS || cnt_words != length) {
11225                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11226                 return -EIO;
11227         }
11228
11229         return 0;
11230 }
11231
11232 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11233                                       struct ether_addr *mac_addr)
11234 {
11235         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11236         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11237         struct i40e_vsi *vsi = pf->main_vsi;
11238         struct i40e_mac_filter_info mac_filter;
11239         struct i40e_mac_filter *f;
11240         int ret;
11241
11242         if (!is_valid_assigned_ether_addr(mac_addr)) {
11243                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11244                 return;
11245         }
11246
11247         TAILQ_FOREACH(f, &vsi->mac_list, next) {
11248                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
11249                         break;
11250         }
11251
11252         if (f == NULL) {
11253                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11254                 return;
11255         }
11256
11257         mac_filter = f->mac_info;
11258         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11259         if (ret != I40E_SUCCESS) {
11260                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11261                 return;
11262         }
11263         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11264         ret = i40e_vsi_add_mac(vsi, &mac_filter);
11265         if (ret != I40E_SUCCESS) {
11266                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11267                 return;
11268         }
11269         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11270
11271         i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11272                                   mac_addr->addr_bytes, NULL);
11273 }
11274
11275 static int
11276 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11277 {
11278         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11279         struct rte_eth_dev_data *dev_data = pf->dev_data;
11280         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11281         int ret = 0;
11282
11283         /* check if mtu is within the allowed range */
11284         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
11285                 return -EINVAL;
11286
11287         /* mtu setting is forbidden if port is start */
11288         if (dev_data->dev_started) {
11289                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11290                             dev_data->port_id);
11291                 return -EBUSY;
11292         }
11293
11294         if (frame_size > ETHER_MAX_LEN)
11295                 dev_data->dev_conf.rxmode.jumbo_frame = 1;
11296         else
11297                 dev_data->dev_conf.rxmode.jumbo_frame = 0;
11298
11299         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11300
11301         return ret;
11302 }
11303
11304 /* Restore ethertype filter */
11305 static void
11306 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11307 {
11308         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11309         struct i40e_ethertype_filter_list
11310                 *ethertype_list = &pf->ethertype.ethertype_list;
11311         struct i40e_ethertype_filter *f;
11312         struct i40e_control_filter_stats stats;
11313         uint16_t flags;
11314
11315         TAILQ_FOREACH(f, ethertype_list, rules) {
11316                 flags = 0;
11317                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11318                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11319                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11320                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11321                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11322
11323                 memset(&stats, 0, sizeof(stats));
11324                 i40e_aq_add_rem_control_packet_filter(hw,
11325                                             f->input.mac_addr.addr_bytes,
11326                                             f->input.ether_type,
11327                                             flags, pf->main_vsi->seid,
11328                                             f->queue, 1, &stats, NULL);
11329         }
11330         PMD_DRV_LOG(INFO, "Ethertype filter:"
11331                     " mac_etype_used = %u, etype_used = %u,"
11332                     " mac_etype_free = %u, etype_free = %u",
11333                     stats.mac_etype_used, stats.etype_used,
11334                     stats.mac_etype_free, stats.etype_free);
11335 }
11336
11337 /* Restore tunnel filter */
11338 static void
11339 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11340 {
11341         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11342         struct i40e_vsi *vsi;
11343         struct i40e_pf_vf *vf;
11344         struct i40e_tunnel_filter_list
11345                 *tunnel_list = &pf->tunnel.tunnel_list;
11346         struct i40e_tunnel_filter *f;
11347         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
11348         bool big_buffer = 0;
11349
11350         TAILQ_FOREACH(f, tunnel_list, rules) {
11351                 if (!f->is_to_vf)
11352                         vsi = pf->main_vsi;
11353                 else {
11354                         vf = &pf->vfs[f->vf_id];
11355                         vsi = vf->vsi;
11356                 }
11357                 memset(&cld_filter, 0, sizeof(cld_filter));
11358                 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
11359                         (struct ether_addr *)&cld_filter.element.outer_mac);
11360                 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
11361                         (struct ether_addr *)&cld_filter.element.inner_mac);
11362                 cld_filter.element.inner_vlan = f->input.inner_vlan;
11363                 cld_filter.element.flags = f->input.flags;
11364                 cld_filter.element.tenant_id = f->input.tenant_id;
11365                 cld_filter.element.queue_number = f->queue;
11366                 rte_memcpy(cld_filter.general_fields,
11367                            f->input.general_fields,
11368                            sizeof(f->input.general_fields));
11369
11370                 if (((f->input.flags &
11371                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11372                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11373                     ((f->input.flags &
11374                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11375                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11376                     ((f->input.flags &
11377                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11378                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
11379                         big_buffer = 1;
11380
11381                 if (big_buffer)
11382                         i40e_aq_add_cloud_filters_big_buffer(hw,
11383                                              vsi->seid, &cld_filter, 1);
11384                 else
11385                         i40e_aq_add_cloud_filters(hw, vsi->seid,
11386                                                   &cld_filter.element, 1);
11387         }
11388 }
11389
11390 /* Restore rss filter */
11391 static inline void
11392 i40e_rss_filter_restore(struct i40e_pf *pf)
11393 {
11394         struct i40e_rte_flow_rss_conf *conf =
11395                                         &pf->rss_info;
11396         if (conf->num)
11397                 i40e_config_rss_filter(pf, conf, TRUE);
11398 }
11399
11400 static void
11401 i40e_filter_restore(struct i40e_pf *pf)
11402 {
11403         i40e_ethertype_filter_restore(pf);
11404         i40e_tunnel_filter_restore(pf);
11405         i40e_fdir_filter_restore(pf);
11406         i40e_rss_filter_restore(pf);
11407 }
11408
11409 static bool
11410 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11411 {
11412         if (strcmp(dev->device->driver->name, drv->driver.name))
11413                 return false;
11414
11415         return true;
11416 }
11417
11418 bool
11419 is_i40e_supported(struct rte_eth_dev *dev)
11420 {
11421         return is_device_supported(dev, &rte_i40e_pmd);
11422 }
11423
11424 struct i40e_customized_pctype*
11425 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11426 {
11427         int i;
11428
11429         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11430                 if (pf->customized_pctype[i].index == index)
11431                         return &pf->customized_pctype[i];
11432         }
11433         return NULL;
11434 }
11435
11436 static int
11437 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11438                               uint32_t pkg_size, uint32_t proto_num,
11439                               struct rte_pmd_i40e_proto_info *proto)
11440 {
11441         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11442         uint32_t pctype_num;
11443         struct rte_pmd_i40e_ptype_info *pctype;
11444         uint32_t buff_size;
11445         struct i40e_customized_pctype *new_pctype = NULL;
11446         uint8_t proto_id;
11447         uint8_t pctype_value;
11448         char name[64];
11449         uint32_t i, j, n;
11450         int ret;
11451
11452         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11453                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
11454                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11455         if (ret) {
11456                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11457                 return -1;
11458         }
11459         if (!pctype_num) {
11460                 PMD_DRV_LOG(INFO, "No new pctype added");
11461                 return -1;
11462         }
11463
11464         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11465         pctype = rte_zmalloc("new_pctype", buff_size, 0);
11466         if (!pctype) {
11467                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11468                 return -1;
11469         }
11470         /* get information about new pctype list */
11471         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11472                                         (uint8_t *)pctype, buff_size,
11473                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11474         if (ret) {
11475                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11476                 rte_free(pctype);
11477                 return -1;
11478         }
11479
11480         /* Update customized pctype. */
11481         for (i = 0; i < pctype_num; i++) {
11482                 pctype_value = pctype[i].ptype_id;
11483                 memset(name, 0, sizeof(name));
11484                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11485                         proto_id = pctype[i].protocols[j];
11486                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11487                                 continue;
11488                         for (n = 0; n < proto_num; n++) {
11489                                 if (proto[n].proto_id != proto_id)
11490                                         continue;
11491                                 strcat(name, proto[n].name);
11492                                 strcat(name, "_");
11493                                 break;
11494                         }
11495                 }
11496                 name[strlen(name) - 1] = '\0';
11497                 if (!strcmp(name, "GTPC"))
11498                         new_pctype =
11499                                 i40e_find_customized_pctype(pf,
11500                                                       I40E_CUSTOMIZED_GTPC);
11501                 else if (!strcmp(name, "GTPU_IPV4"))
11502                         new_pctype =
11503                                 i40e_find_customized_pctype(pf,
11504                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11505                 else if (!strcmp(name, "GTPU_IPV6"))
11506                         new_pctype =
11507                                 i40e_find_customized_pctype(pf,
11508                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11509                 else if (!strcmp(name, "GTPU"))
11510                         new_pctype =
11511                                 i40e_find_customized_pctype(pf,
11512                                                       I40E_CUSTOMIZED_GTPU);
11513                 if (new_pctype) {
11514                         new_pctype->pctype = pctype_value;
11515                         new_pctype->valid = true;
11516                 }
11517         }
11518
11519         rte_free(pctype);
11520         return 0;
11521 }
11522
11523 static int
11524 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11525                                uint32_t pkg_size, uint32_t proto_num,
11526                                struct rte_pmd_i40e_proto_info *proto)
11527 {
11528         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11529         uint16_t port_id = dev->data->port_id;
11530         uint32_t ptype_num;
11531         struct rte_pmd_i40e_ptype_info *ptype;
11532         uint32_t buff_size;
11533         uint8_t proto_id;
11534         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11535         uint32_t i, j, n;
11536         bool in_tunnel;
11537         int ret;
11538
11539         /* get information about new ptype num */
11540         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11541                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
11542                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11543         if (ret) {
11544                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
11545                 return ret;
11546         }
11547         if (!ptype_num) {
11548                 PMD_DRV_LOG(INFO, "No new ptype added");
11549                 return -1;
11550         }
11551
11552         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11553         ptype = rte_zmalloc("new_ptype", buff_size, 0);
11554         if (!ptype) {
11555                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11556                 return -1;
11557         }
11558
11559         /* get information about new ptype list */
11560         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11561                                         (uint8_t *)ptype, buff_size,
11562                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11563         if (ret) {
11564                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
11565                 rte_free(ptype);
11566                 return ret;
11567         }
11568
11569         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11570         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11571         if (!ptype_mapping) {
11572                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11573                 rte_free(ptype);
11574                 return -1;
11575         }
11576
11577         /* Update ptype mapping table. */
11578         for (i = 0; i < ptype_num; i++) {
11579                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11580                 ptype_mapping[i].sw_ptype = 0;
11581                 in_tunnel = false;
11582                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11583                         proto_id = ptype[i].protocols[j];
11584                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11585                                 continue;
11586                         for (n = 0; n < proto_num; n++) {
11587                                 if (proto[n].proto_id != proto_id)
11588                                         continue;
11589                                 memset(name, 0, sizeof(name));
11590                                 strcpy(name, proto[n].name);
11591                                 if (!strncasecmp(name, "PPPOE", 5))
11592                                         ptype_mapping[i].sw_ptype |=
11593                                                 RTE_PTYPE_L2_ETHER_PPPOE;
11594                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11595                                          !in_tunnel) {
11596                                         ptype_mapping[i].sw_ptype |=
11597                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11598                                         ptype_mapping[i].sw_ptype |=
11599                                                 RTE_PTYPE_L4_FRAG;
11600                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11601                                            in_tunnel) {
11602                                         ptype_mapping[i].sw_ptype |=
11603                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11604                                         ptype_mapping[i].sw_ptype |=
11605                                                 RTE_PTYPE_INNER_L4_FRAG;
11606                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
11607                                         ptype_mapping[i].sw_ptype |=
11608                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11609                                         in_tunnel = true;
11610                                 } else if (!strncasecmp(name, "IPV4", 4) &&
11611                                            !in_tunnel)
11612                                         ptype_mapping[i].sw_ptype |=
11613                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11614                                 else if (!strncasecmp(name, "IPV4", 4) &&
11615                                          in_tunnel)
11616                                         ptype_mapping[i].sw_ptype |=
11617                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11618                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11619                                          !in_tunnel) {
11620                                         ptype_mapping[i].sw_ptype |=
11621                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11622                                         ptype_mapping[i].sw_ptype |=
11623                                                 RTE_PTYPE_L4_FRAG;
11624                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11625                                            in_tunnel) {
11626                                         ptype_mapping[i].sw_ptype |=
11627                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11628                                         ptype_mapping[i].sw_ptype |=
11629                                                 RTE_PTYPE_INNER_L4_FRAG;
11630                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
11631                                         ptype_mapping[i].sw_ptype |=
11632                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11633                                         in_tunnel = true;
11634                                 } else if (!strncasecmp(name, "IPV6", 4) &&
11635                                            !in_tunnel)
11636                                         ptype_mapping[i].sw_ptype |=
11637                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11638                                 else if (!strncasecmp(name, "IPV6", 4) &&
11639                                          in_tunnel)
11640                                         ptype_mapping[i].sw_ptype |=
11641                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11642                                 else if (!strncasecmp(name, "UDP", 3) &&
11643                                          !in_tunnel)
11644                                         ptype_mapping[i].sw_ptype |=
11645                                                 RTE_PTYPE_L4_UDP;
11646                                 else if (!strncasecmp(name, "UDP", 3) &&
11647                                          in_tunnel)
11648                                         ptype_mapping[i].sw_ptype |=
11649                                                 RTE_PTYPE_INNER_L4_UDP;
11650                                 else if (!strncasecmp(name, "TCP", 3) &&
11651                                          !in_tunnel)
11652                                         ptype_mapping[i].sw_ptype |=
11653                                                 RTE_PTYPE_L4_TCP;
11654                                 else if (!strncasecmp(name, "TCP", 3) &&
11655                                          in_tunnel)
11656                                         ptype_mapping[i].sw_ptype |=
11657                                                 RTE_PTYPE_INNER_L4_TCP;
11658                                 else if (!strncasecmp(name, "SCTP", 4) &&
11659                                          !in_tunnel)
11660                                         ptype_mapping[i].sw_ptype |=
11661                                                 RTE_PTYPE_L4_SCTP;
11662                                 else if (!strncasecmp(name, "SCTP", 4) &&
11663                                          in_tunnel)
11664                                         ptype_mapping[i].sw_ptype |=
11665                                                 RTE_PTYPE_INNER_L4_SCTP;
11666                                 else if ((!strncasecmp(name, "ICMP", 4) ||
11667                                           !strncasecmp(name, "ICMPV6", 6)) &&
11668                                          !in_tunnel)
11669                                         ptype_mapping[i].sw_ptype |=
11670                                                 RTE_PTYPE_L4_ICMP;
11671                                 else if ((!strncasecmp(name, "ICMP", 4) ||
11672                                           !strncasecmp(name, "ICMPV6", 6)) &&
11673                                          in_tunnel)
11674                                         ptype_mapping[i].sw_ptype |=
11675                                                 RTE_PTYPE_INNER_L4_ICMP;
11676                                 else if (!strncasecmp(name, "GTPC", 4)) {
11677                                         ptype_mapping[i].sw_ptype |=
11678                                                 RTE_PTYPE_TUNNEL_GTPC;
11679                                         in_tunnel = true;
11680                                 } else if (!strncasecmp(name, "GTPU", 4)) {
11681                                         ptype_mapping[i].sw_ptype |=
11682                                                 RTE_PTYPE_TUNNEL_GTPU;
11683                                         in_tunnel = true;
11684                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
11685                                         ptype_mapping[i].sw_ptype |=
11686                                                 RTE_PTYPE_TUNNEL_GRENAT;
11687                                         in_tunnel = true;
11688                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
11689                                         ptype_mapping[i].sw_ptype |=
11690                                                 RTE_PTYPE_TUNNEL_L2TP;
11691                                         in_tunnel = true;
11692                                 }
11693
11694                                 break;
11695                         }
11696                 }
11697         }
11698
11699         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
11700                                                 ptype_num, 0);
11701         if (ret)
11702                 PMD_DRV_LOG(ERR, "Failed to update mapping table.");
11703
11704         rte_free(ptype_mapping);
11705         rte_free(ptype);
11706         return ret;
11707 }
11708
11709 void
11710 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
11711                               uint32_t pkg_size)
11712 {
11713         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11714         uint32_t proto_num;
11715         struct rte_pmd_i40e_proto_info *proto;
11716         uint32_t buff_size;
11717         uint32_t i;
11718         int ret;
11719
11720         /* get information about protocol number */
11721         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11722                                        (uint8_t *)&proto_num, sizeof(proto_num),
11723                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
11724         if (ret) {
11725                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
11726                 return;
11727         }
11728         if (!proto_num) {
11729                 PMD_DRV_LOG(INFO, "No new protocol added");
11730                 return;
11731         }
11732
11733         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
11734         proto = rte_zmalloc("new_proto", buff_size, 0);
11735         if (!proto) {
11736                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11737                 return;
11738         }
11739
11740         /* get information about protocol list */
11741         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11742                                         (uint8_t *)proto, buff_size,
11743                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
11744         if (ret) {
11745                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
11746                 rte_free(proto);
11747                 return;
11748         }
11749
11750         /* Check if GTP is supported. */
11751         for (i = 0; i < proto_num; i++) {
11752                 if (!strncmp(proto[i].name, "GTP", 3)) {
11753                         pf->gtp_support = true;
11754                         break;
11755                 }
11756         }
11757
11758         /* Update customized pctype info */
11759         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
11760                                             proto_num, proto);
11761         if (ret)
11762                 PMD_DRV_LOG(INFO, "No pctype is updated.");
11763
11764         /* Update customized ptype info */
11765         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
11766                                            proto_num, proto);
11767         if (ret)
11768                 PMD_DRV_LOG(INFO, "No ptype is updated.");
11769
11770         rte_free(proto);
11771 }
11772
11773 /* Create a QinQ cloud filter
11774  *
11775  * The Fortville NIC has limited resources for tunnel filters,
11776  * so we can only reuse existing filters.
11777  *
11778  * In step 1 we define which Field Vector fields can be used for
11779  * filter types.
11780  * As we do not have the inner tag defined as a field,
11781  * we have to define it first, by reusing one of L1 entries.
11782  *
11783  * In step 2 we are replacing one of existing filter types with
11784  * a new one for QinQ.
11785  * As we reusing L1 and replacing L2, some of the default filter
11786  * types will disappear,which depends on L1 and L2 entries we reuse.
11787  *
11788  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
11789  *
11790  * 1.   Create L1 filter of outer vlan (12b) which will be in use
11791  *              later when we define the cloud filter.
11792  *      a.      Valid_flags.replace_cloud = 0
11793  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
11794  *      c.      New_filter = 0x10
11795  *      d.      TR bit = 0xff (optional, not used here)
11796  *      e.      Buffer – 2 entries:
11797  *              i.      Byte 0 = 8 (outer vlan FV index).
11798  *                      Byte 1 = 0 (rsv)
11799  *                      Byte 2-3 = 0x0fff
11800  *              ii.     Byte 0 = 37 (inner vlan FV index).
11801  *                      Byte 1 =0 (rsv)
11802  *                      Byte 2-3 = 0x0fff
11803  *
11804  * Step 2:
11805  * 2.   Create cloud filter using two L1 filters entries: stag and
11806  *              new filter(outer vlan+ inner vlan)
11807  *      a.      Valid_flags.replace_cloud = 1
11808  *      b.      Old_filter = 1 (instead of outer IP)
11809  *      c.      New_filter = 0x10
11810  *      d.      Buffer – 2 entries:
11811  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
11812  *                      Byte 1-3 = 0 (rsv)
11813  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
11814  *                      Byte 9-11 = 0 (rsv)
11815  */
11816 static int
11817 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
11818 {
11819         int ret = -ENOTSUP;
11820         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
11821         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
11822         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11823
11824         if (pf->support_multi_driver) {
11825                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
11826                 return ret;
11827         }
11828
11829         /* Init */
11830         memset(&filter_replace, 0,
11831                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
11832         memset(&filter_replace_buf, 0,
11833                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
11834
11835         /* create L1 filter */
11836         filter_replace.old_filter_type =
11837                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
11838         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11839         filter_replace.tr_bit = 0;
11840
11841         /* Prepare the buffer, 2 entries */
11842         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
11843         filter_replace_buf.data[0] |=
11844                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11845         /* Field Vector 12b mask */
11846         filter_replace_buf.data[2] = 0xff;
11847         filter_replace_buf.data[3] = 0x0f;
11848         filter_replace_buf.data[4] =
11849                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
11850         filter_replace_buf.data[4] |=
11851                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11852         /* Field Vector 12b mask */
11853         filter_replace_buf.data[6] = 0xff;
11854         filter_replace_buf.data[7] = 0x0f;
11855         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
11856                         &filter_replace_buf);
11857         if (ret != I40E_SUCCESS)
11858                 return ret;
11859         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
11860                     "cloud l1 type is changed from 0x%x to 0x%x",
11861                     filter_replace.old_filter_type,
11862                     filter_replace.new_filter_type);
11863
11864         /* Apply the second L2 cloud filter */
11865         memset(&filter_replace, 0,
11866                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
11867         memset(&filter_replace_buf, 0,
11868                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
11869
11870         /* create L2 filter, input for L2 filter will be L1 filter  */
11871         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
11872         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
11873         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11874
11875         /* Prepare the buffer, 2 entries */
11876         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
11877         filter_replace_buf.data[0] |=
11878                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11879         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11880         filter_replace_buf.data[4] |=
11881                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11882         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
11883                         &filter_replace_buf);
11884         if (!ret) {
11885                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
11886                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
11887                             "cloud filter type is changed from 0x%x to 0x%x",
11888                             filter_replace.old_filter_type,
11889                             filter_replace.new_filter_type);
11890         }
11891         return ret;
11892 }
11893
11894 int
11895 i40e_config_rss_filter(struct i40e_pf *pf,
11896                 struct i40e_rte_flow_rss_conf *conf, bool add)
11897 {
11898         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11899         uint32_t i, lut = 0;
11900         uint16_t j, num;
11901         struct rte_eth_rss_conf rss_conf = conf->rss_conf;
11902         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
11903
11904         if (!add) {
11905                 if (memcmp(conf, rss_info,
11906                         sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
11907                         i40e_pf_disable_rss(pf);
11908                         memset(rss_info, 0,
11909                                 sizeof(struct i40e_rte_flow_rss_conf));
11910                         return 0;
11911                 }
11912                 return -EINVAL;
11913         }
11914
11915         if (rss_info->num)
11916                 return -EINVAL;
11917
11918         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
11919          * It's necessary to calculate the actual PF queues that are configured.
11920          */
11921         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
11922                 num = i40e_pf_calc_configured_queues_num(pf);
11923         else
11924                 num = pf->dev_data->nb_rx_queues;
11925
11926         num = RTE_MIN(num, conf->num);
11927         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
11928                         num);
11929
11930         if (num == 0) {
11931                 PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
11932                 return -ENOTSUP;
11933         }
11934
11935         /* Fill in redirection table */
11936         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
11937                 if (j == num)
11938                         j = 0;
11939                 lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
11940                         hw->func_caps.rss_table_entry_width) - 1));
11941                 if ((i & 3) == 3)
11942                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
11943         }
11944
11945         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
11946                 i40e_pf_disable_rss(pf);
11947                 return 0;
11948         }
11949         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
11950                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
11951                 /* Random default keys */
11952                 static uint32_t rss_key_default[] = {0x6b793944,
11953                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
11954                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
11955                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
11956
11957                 rss_conf.rss_key = (uint8_t *)rss_key_default;
11958                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
11959                                                         sizeof(uint32_t);
11960         }
11961
11962         i40e_hw_rss_hash_set(pf, &rss_conf);
11963
11964         rte_memcpy(rss_info,
11965                 conf, sizeof(struct i40e_rte_flow_rss_conf));
11966
11967         return 0;
11968 }
11969
11970 RTE_INIT(i40e_init_log);
11971 static void
11972 i40e_init_log(void)
11973 {
11974         i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
11975         if (i40e_logtype_init >= 0)
11976                 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
11977         i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
11978         if (i40e_logtype_driver >= 0)
11979                 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
11980 }
11981
11982 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
11983                               QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
11984                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1");