f9408b0e74b84d53fd9e2dc595e54f5bc3ec9637
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_eal.h>
15 #include <rte_string_fns.h>
16 #include <rte_pci.h>
17 #include <rte_bus_pci.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_pci.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_alarm.h>
25 #include <rte_dev.h>
26 #include <rte_eth_ctrl.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29
30 #include "i40e_logs.h"
31 #include "base/i40e_prototype.h"
32 #include "base/i40e_adminq_cmd.h"
33 #include "base/i40e_type.h"
34 #include "base/i40e_register.h"
35 #include "base/i40e_dcb.h"
36 #include "i40e_ethdev.h"
37 #include "i40e_rxtx.h"
38 #include "i40e_pf.h"
39 #include "i40e_regs.h"
40 #include "rte_pmd_i40e.h"
41
42 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
43 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
44
45 #define I40E_CLEAR_PXE_WAIT_MS     200
46
47 /* Maximun number of capability elements */
48 #define I40E_MAX_CAP_ELE_NUM       128
49
50 /* Wait count and interval */
51 #define I40E_CHK_Q_ENA_COUNT       1000
52 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
53
54 /* Maximun number of VSI */
55 #define I40E_MAX_NUM_VSIS          (384UL)
56
57 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
58
59 /* Flow control default timer */
60 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
61
62 /* Flow control enable fwd bit */
63 #define I40E_PRTMAC_FWD_CTRL   0x00000001
64
65 /* Receive Packet Buffer size */
66 #define I40E_RXPBSIZE (968 * 1024)
67
68 /* Kilobytes shift */
69 #define I40E_KILOSHIFT 10
70
71 /* Flow control default high water */
72 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
73
74 /* Flow control default low water */
75 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
76
77 /* Receive Average Packet Size in Byte*/
78 #define I40E_PACKET_AVERAGE_SIZE 128
79
80 /* Mask of PF interrupt causes */
81 #define I40E_PFINT_ICR0_ENA_MASK ( \
82                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
83                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
84                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
85                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
86                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
87                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
88                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
89                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
90                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
91
92 #define I40E_FLOW_TYPES ( \
93         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
94         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
95         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
96         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
97         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
98         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
99         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
100         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
103         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
104
105 /* Additional timesync values. */
106 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
107 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
108 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
109 #define I40E_PRTTSYN_TSYNENA     0x80000000
110 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
111 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
112
113 /**
114  * Below are values for writing un-exposed registers suggested
115  * by silicon experts
116  */
117 /* Destination MAC address */
118 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
119 /* Source MAC address */
120 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
121 /* Outer (S-Tag) VLAN tag in the outer L2 header */
122 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
123 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
124 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
125 /* Single VLAN tag in the inner L2 header */
126 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
127 /* Source IPv4 address */
128 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
129 /* Destination IPv4 address */
130 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
131 /* Source IPv4 address for X722 */
132 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
133 /* Destination IPv4 address for X722 */
134 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
135 /* IPv4 Protocol for X722 */
136 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
137 /* IPv4 Time to Live for X722 */
138 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
139 /* IPv4 Type of Service (TOS) */
140 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
141 /* IPv4 Protocol */
142 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
143 /* IPv4 Time to Live */
144 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
145 /* Source IPv6 address */
146 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
147 /* Destination IPv6 address */
148 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
149 /* IPv6 Traffic Class (TC) */
150 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
151 /* IPv6 Next Header */
152 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
153 /* IPv6 Hop Limit */
154 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
155 /* Source L4 port */
156 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
157 /* Destination L4 port */
158 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
159 /* SCTP verification tag */
160 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
161 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
162 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
163 /* Source port of tunneling UDP */
164 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
165 /* Destination port of tunneling UDP */
166 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
167 /* UDP Tunneling ID, NVGRE/GRE key */
168 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
169 /* Last ether type */
170 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
171 /* Tunneling outer destination IPv4 address */
172 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
173 /* Tunneling outer destination IPv6 address */
174 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
175 /* 1st word of flex payload */
176 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
177 /* 2nd word of flex payload */
178 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
179 /* 3rd word of flex payload */
180 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
181 /* 4th word of flex payload */
182 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
183 /* 5th word of flex payload */
184 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
185 /* 6th word of flex payload */
186 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
187 /* 7th word of flex payload */
188 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
189 /* 8th word of flex payload */
190 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
191 /* all 8 words flex payload */
192 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
193 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
194
195 #define I40E_TRANSLATE_INSET 0
196 #define I40E_TRANSLATE_REG   1
197
198 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
199 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
200 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
201 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
202 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
203 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
204
205 /* PCI offset for querying capability */
206 #define PCI_DEV_CAP_REG            0xA4
207 /* PCI offset for enabling/disabling Extended Tag */
208 #define PCI_DEV_CTRL_REG           0xA8
209 /* Bit mask of Extended Tag capability */
210 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
211 /* Bit shift of Extended Tag enable/disable */
212 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
213 /* Bit mask of Extended Tag enable/disable */
214 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
215
216 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
217 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
218 static int i40e_dev_configure(struct rte_eth_dev *dev);
219 static int i40e_dev_start(struct rte_eth_dev *dev);
220 static void i40e_dev_stop(struct rte_eth_dev *dev);
221 static void i40e_dev_close(struct rte_eth_dev *dev);
222 static int  i40e_dev_reset(struct rte_eth_dev *dev);
223 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
224 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
225 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
226 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
227 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
228 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
229 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
230                                struct rte_eth_stats *stats);
231 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
232                                struct rte_eth_xstat *xstats, unsigned n);
233 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
234                                      struct rte_eth_xstat_name *xstats_names,
235                                      unsigned limit);
236 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
237 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
238                                             uint16_t queue_id,
239                                             uint8_t stat_idx,
240                                             uint8_t is_rx);
241 static int i40e_fw_version_get(struct rte_eth_dev *dev,
242                                 char *fw_version, size_t fw_size);
243 static void i40e_dev_info_get(struct rte_eth_dev *dev,
244                               struct rte_eth_dev_info *dev_info);
245 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
246                                 uint16_t vlan_id,
247                                 int on);
248 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
249                               enum rte_vlan_type vlan_type,
250                               uint16_t tpid);
251 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
252 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
253                                       uint16_t queue,
254                                       int on);
255 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
256 static int i40e_dev_led_on(struct rte_eth_dev *dev);
257 static int i40e_dev_led_off(struct rte_eth_dev *dev);
258 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
259                               struct rte_eth_fc_conf *fc_conf);
260 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
261                               struct rte_eth_fc_conf *fc_conf);
262 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
263                                        struct rte_eth_pfc_conf *pfc_conf);
264 static int i40e_macaddr_add(struct rte_eth_dev *dev,
265                             struct ether_addr *mac_addr,
266                             uint32_t index,
267                             uint32_t pool);
268 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
269 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
270                                     struct rte_eth_rss_reta_entry64 *reta_conf,
271                                     uint16_t reta_size);
272 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
273                                    struct rte_eth_rss_reta_entry64 *reta_conf,
274                                    uint16_t reta_size);
275
276 static int i40e_get_cap(struct i40e_hw *hw);
277 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
278 static int i40e_pf_setup(struct i40e_pf *pf);
279 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
280 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
281 static int i40e_dcb_setup(struct rte_eth_dev *dev);
282 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
283                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
284 static void i40e_stat_update_48(struct i40e_hw *hw,
285                                uint32_t hireg,
286                                uint32_t loreg,
287                                bool offset_loaded,
288                                uint64_t *offset,
289                                uint64_t *stat);
290 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
291 static void i40e_dev_interrupt_handler(void *param);
292 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
293                                 uint32_t base, uint32_t num);
294 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
295 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
296                         uint32_t base);
297 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
298                         uint16_t num);
299 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
300 static int i40e_veb_release(struct i40e_veb *veb);
301 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
302                                                 struct i40e_vsi *vsi);
303 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
304 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
305 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
306                                              struct i40e_macvlan_filter *mv_f,
307                                              int num,
308                                              uint16_t vlan);
309 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
310 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
311                                     struct rte_eth_rss_conf *rss_conf);
312 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
313                                       struct rte_eth_rss_conf *rss_conf);
314 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
315                                         struct rte_eth_udp_tunnel *udp_tunnel);
316 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
317                                         struct rte_eth_udp_tunnel *udp_tunnel);
318 static void i40e_filter_input_set_init(struct i40e_pf *pf);
319 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
320                                 enum rte_filter_op filter_op,
321                                 void *arg);
322 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
323                                 enum rte_filter_type filter_type,
324                                 enum rte_filter_op filter_op,
325                                 void *arg);
326 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
327                                   struct rte_eth_dcb_info *dcb_info);
328 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
329 static void i40e_configure_registers(struct i40e_hw *hw);
330 static void i40e_hw_init(struct rte_eth_dev *dev);
331 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
332 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
333                                                      uint16_t seid,
334                                                      uint16_t rule_type,
335                                                      uint16_t *entries,
336                                                      uint16_t count,
337                                                      uint16_t rule_id);
338 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
339                         struct rte_eth_mirror_conf *mirror_conf,
340                         uint8_t sw_id, uint8_t on);
341 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
342
343 static int i40e_timesync_enable(struct rte_eth_dev *dev);
344 static int i40e_timesync_disable(struct rte_eth_dev *dev);
345 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
346                                            struct timespec *timestamp,
347                                            uint32_t flags);
348 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
349                                            struct timespec *timestamp);
350 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
351
352 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
353
354 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
355                                    struct timespec *timestamp);
356 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
357                                     const struct timespec *timestamp);
358
359 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
360                                          uint16_t queue_id);
361 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
362                                           uint16_t queue_id);
363
364 static int i40e_get_regs(struct rte_eth_dev *dev,
365                          struct rte_dev_reg_info *regs);
366
367 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
368
369 static int i40e_get_eeprom(struct rte_eth_dev *dev,
370                            struct rte_dev_eeprom_info *eeprom);
371
372 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
373                                       struct ether_addr *mac_addr);
374
375 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
376
377 static int i40e_ethertype_filter_convert(
378         const struct rte_eth_ethertype_filter *input,
379         struct i40e_ethertype_filter *filter);
380 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
381                                    struct i40e_ethertype_filter *filter);
382
383 static int i40e_tunnel_filter_convert(
384         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
385         struct i40e_tunnel_filter *tunnel_filter);
386 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
387                                 struct i40e_tunnel_filter *tunnel_filter);
388 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
389
390 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
391 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
392 static void i40e_filter_restore(struct i40e_pf *pf);
393 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
394
395 int i40e_logtype_init;
396 int i40e_logtype_driver;
397
398 static const struct rte_pci_id pci_id_i40e_map[] = {
399         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
400         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
401         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
402         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
403         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
404         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
405         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
406         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
407         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
408         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
409         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
410         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
411         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
412         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
413         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
419         { .vendor_id = 0, /* sentinel */ },
420 };
421
422 static const struct eth_dev_ops i40e_eth_dev_ops = {
423         .dev_configure                = i40e_dev_configure,
424         .dev_start                    = i40e_dev_start,
425         .dev_stop                     = i40e_dev_stop,
426         .dev_close                    = i40e_dev_close,
427         .dev_reset                    = i40e_dev_reset,
428         .promiscuous_enable           = i40e_dev_promiscuous_enable,
429         .promiscuous_disable          = i40e_dev_promiscuous_disable,
430         .allmulticast_enable          = i40e_dev_allmulticast_enable,
431         .allmulticast_disable         = i40e_dev_allmulticast_disable,
432         .dev_set_link_up              = i40e_dev_set_link_up,
433         .dev_set_link_down            = i40e_dev_set_link_down,
434         .link_update                  = i40e_dev_link_update,
435         .stats_get                    = i40e_dev_stats_get,
436         .xstats_get                   = i40e_dev_xstats_get,
437         .xstats_get_names             = i40e_dev_xstats_get_names,
438         .stats_reset                  = i40e_dev_stats_reset,
439         .xstats_reset                 = i40e_dev_stats_reset,
440         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
441         .fw_version_get               = i40e_fw_version_get,
442         .dev_infos_get                = i40e_dev_info_get,
443         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
444         .vlan_filter_set              = i40e_vlan_filter_set,
445         .vlan_tpid_set                = i40e_vlan_tpid_set,
446         .vlan_offload_set             = i40e_vlan_offload_set,
447         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
448         .vlan_pvid_set                = i40e_vlan_pvid_set,
449         .rx_queue_start               = i40e_dev_rx_queue_start,
450         .rx_queue_stop                = i40e_dev_rx_queue_stop,
451         .tx_queue_start               = i40e_dev_tx_queue_start,
452         .tx_queue_stop                = i40e_dev_tx_queue_stop,
453         .rx_queue_setup               = i40e_dev_rx_queue_setup,
454         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
455         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
456         .rx_queue_release             = i40e_dev_rx_queue_release,
457         .rx_queue_count               = i40e_dev_rx_queue_count,
458         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
459         .rx_descriptor_status         = i40e_dev_rx_descriptor_status,
460         .tx_descriptor_status         = i40e_dev_tx_descriptor_status,
461         .tx_queue_setup               = i40e_dev_tx_queue_setup,
462         .tx_queue_release             = i40e_dev_tx_queue_release,
463         .dev_led_on                   = i40e_dev_led_on,
464         .dev_led_off                  = i40e_dev_led_off,
465         .flow_ctrl_get                = i40e_flow_ctrl_get,
466         .flow_ctrl_set                = i40e_flow_ctrl_set,
467         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
468         .mac_addr_add                 = i40e_macaddr_add,
469         .mac_addr_remove              = i40e_macaddr_remove,
470         .reta_update                  = i40e_dev_rss_reta_update,
471         .reta_query                   = i40e_dev_rss_reta_query,
472         .rss_hash_update              = i40e_dev_rss_hash_update,
473         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
474         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
475         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
476         .filter_ctrl                  = i40e_dev_filter_ctrl,
477         .rxq_info_get                 = i40e_rxq_info_get,
478         .txq_info_get                 = i40e_txq_info_get,
479         .mirror_rule_set              = i40e_mirror_rule_set,
480         .mirror_rule_reset            = i40e_mirror_rule_reset,
481         .timesync_enable              = i40e_timesync_enable,
482         .timesync_disable             = i40e_timesync_disable,
483         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
484         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
485         .get_dcb_info                 = i40e_dev_get_dcb_info,
486         .timesync_adjust_time         = i40e_timesync_adjust_time,
487         .timesync_read_time           = i40e_timesync_read_time,
488         .timesync_write_time          = i40e_timesync_write_time,
489         .get_reg                      = i40e_get_regs,
490         .get_eeprom_length            = i40e_get_eeprom_length,
491         .get_eeprom                   = i40e_get_eeprom,
492         .mac_addr_set                 = i40e_set_default_mac_addr,
493         .mtu_set                      = i40e_dev_mtu_set,
494         .tm_ops_get                   = i40e_tm_ops_get,
495 };
496
497 /* store statistics names and its offset in stats structure */
498 struct rte_i40e_xstats_name_off {
499         char name[RTE_ETH_XSTATS_NAME_SIZE];
500         unsigned offset;
501 };
502
503 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
504         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
505         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
506         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
507         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
508         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
509                 rx_unknown_protocol)},
510         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
511         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
512         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
513         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
514 };
515
516 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
517                 sizeof(rte_i40e_stats_strings[0]))
518
519 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
520         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
521                 tx_dropped_link_down)},
522         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
523         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
524                 illegal_bytes)},
525         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
526         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
527                 mac_local_faults)},
528         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
529                 mac_remote_faults)},
530         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
531                 rx_length_errors)},
532         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
533         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
534         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
535         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
536         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
537         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
538                 rx_size_127)},
539         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
540                 rx_size_255)},
541         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
542                 rx_size_511)},
543         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
544                 rx_size_1023)},
545         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
546                 rx_size_1522)},
547         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
548                 rx_size_big)},
549         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
550                 rx_undersize)},
551         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
552                 rx_oversize)},
553         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
554                 mac_short_packet_dropped)},
555         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
556                 rx_fragments)},
557         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
558         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
559         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
560                 tx_size_127)},
561         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
562                 tx_size_255)},
563         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
564                 tx_size_511)},
565         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
566                 tx_size_1023)},
567         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
568                 tx_size_1522)},
569         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
570                 tx_size_big)},
571         {"rx_flow_director_atr_match_packets",
572                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
573         {"rx_flow_director_sb_match_packets",
574                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
575         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
576                 tx_lpi_status)},
577         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
578                 rx_lpi_status)},
579         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
580                 tx_lpi_count)},
581         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
582                 rx_lpi_count)},
583 };
584
585 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
586                 sizeof(rte_i40e_hw_port_strings[0]))
587
588 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
589         {"xon_packets", offsetof(struct i40e_hw_port_stats,
590                 priority_xon_rx)},
591         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
592                 priority_xoff_rx)},
593 };
594
595 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
596                 sizeof(rte_i40e_rxq_prio_strings[0]))
597
598 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
599         {"xon_packets", offsetof(struct i40e_hw_port_stats,
600                 priority_xon_tx)},
601         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
602                 priority_xoff_tx)},
603         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
604                 priority_xon_2_xoff)},
605 };
606
607 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
608                 sizeof(rte_i40e_txq_prio_strings[0]))
609
610 static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
611         struct rte_pci_device *pci_dev)
612 {
613         return rte_eth_dev_pci_generic_probe(pci_dev,
614                 sizeof(struct i40e_adapter), eth_i40e_dev_init);
615 }
616
617 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
618 {
619         return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
620 }
621
622 static struct rte_pci_driver rte_i40e_pmd = {
623         .id_table = pci_id_i40e_map,
624         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
625                      RTE_PCI_DRV_IOVA_AS_VA,
626         .probe = eth_i40e_pci_probe,
627         .remove = eth_i40e_pci_remove,
628 };
629
630 static inline void
631 i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
632 {
633         i40e_write_rx_ctl(hw, reg_addr, reg_val);
634         PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
635                     "with value 0x%08x",
636                     reg_addr, reg_val);
637 }
638
639 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
640 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
641 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
642
643 #ifndef I40E_GLQF_ORT
644 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
645 #endif
646 #ifndef I40E_GLQF_PIT
647 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
648 #endif
649 #ifndef I40E_GLQF_L3_MAP
650 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
651 #endif
652
653 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
654 {
655         /*
656          * Initialize registers for parsing packet type of QinQ
657          * This should be removed from code once proper
658          * configuration API is added to avoid configuration conflicts
659          * between ports of the same device.
660          */
661         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
662         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
663         i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
664 }
665
666 static inline void i40e_config_automask(struct i40e_pf *pf)
667 {
668         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
669         uint32_t val;
670
671         /* INTENA flag is not auto-cleared for interrupt */
672         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
673         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
674                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
675
676         /* If support multi-driver, PF will use INT0. */
677         if (!pf->support_multi_driver)
678                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
679
680         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
681 }
682
683 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
684
685 /*
686  * Add a ethertype filter to drop all flow control frames transmitted
687  * from VSIs.
688 */
689 static void
690 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
691 {
692         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
693         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
694                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
695                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
696         int ret;
697
698         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
699                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
700                                 pf->main_vsi_seid, 0,
701                                 TRUE, NULL, NULL);
702         if (ret)
703                 PMD_INIT_LOG(ERR,
704                         "Failed to add filter to drop flow control frames from VSIs.");
705 }
706
707 static int
708 floating_veb_list_handler(__rte_unused const char *key,
709                           const char *floating_veb_value,
710                           void *opaque)
711 {
712         int idx = 0;
713         unsigned int count = 0;
714         char *end = NULL;
715         int min, max;
716         bool *vf_floating_veb = opaque;
717
718         while (isblank(*floating_veb_value))
719                 floating_veb_value++;
720
721         /* Reset floating VEB configuration for VFs */
722         for (idx = 0; idx < I40E_MAX_VF; idx++)
723                 vf_floating_veb[idx] = false;
724
725         min = I40E_MAX_VF;
726         do {
727                 while (isblank(*floating_veb_value))
728                         floating_veb_value++;
729                 if (*floating_veb_value == '\0')
730                         return -1;
731                 errno = 0;
732                 idx = strtoul(floating_veb_value, &end, 10);
733                 if (errno || end == NULL)
734                         return -1;
735                 while (isblank(*end))
736                         end++;
737                 if (*end == '-') {
738                         min = idx;
739                 } else if ((*end == ';') || (*end == '\0')) {
740                         max = idx;
741                         if (min == I40E_MAX_VF)
742                                 min = idx;
743                         if (max >= I40E_MAX_VF)
744                                 max = I40E_MAX_VF - 1;
745                         for (idx = min; idx <= max; idx++) {
746                                 vf_floating_veb[idx] = true;
747                                 count++;
748                         }
749                         min = I40E_MAX_VF;
750                 } else {
751                         return -1;
752                 }
753                 floating_veb_value = end + 1;
754         } while (*end != '\0');
755
756         if (count == 0)
757                 return -1;
758
759         return 0;
760 }
761
762 static void
763 config_vf_floating_veb(struct rte_devargs *devargs,
764                        uint16_t floating_veb,
765                        bool *vf_floating_veb)
766 {
767         struct rte_kvargs *kvlist;
768         int i;
769         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
770
771         if (!floating_veb)
772                 return;
773         /* All the VFs attach to the floating VEB by default
774          * when the floating VEB is enabled.
775          */
776         for (i = 0; i < I40E_MAX_VF; i++)
777                 vf_floating_veb[i] = true;
778
779         if (devargs == NULL)
780                 return;
781
782         kvlist = rte_kvargs_parse(devargs->args, NULL);
783         if (kvlist == NULL)
784                 return;
785
786         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
787                 rte_kvargs_free(kvlist);
788                 return;
789         }
790         /* When the floating_veb_list parameter exists, all the VFs
791          * will attach to the legacy VEB firstly, then configure VFs
792          * to the floating VEB according to the floating_veb_list.
793          */
794         if (rte_kvargs_process(kvlist, floating_veb_list,
795                                floating_veb_list_handler,
796                                vf_floating_veb) < 0) {
797                 rte_kvargs_free(kvlist);
798                 return;
799         }
800         rte_kvargs_free(kvlist);
801 }
802
803 static int
804 i40e_check_floating_handler(__rte_unused const char *key,
805                             const char *value,
806                             __rte_unused void *opaque)
807 {
808         if (strcmp(value, "1"))
809                 return -1;
810
811         return 0;
812 }
813
814 static int
815 is_floating_veb_supported(struct rte_devargs *devargs)
816 {
817         struct rte_kvargs *kvlist;
818         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
819
820         if (devargs == NULL)
821                 return 0;
822
823         kvlist = rte_kvargs_parse(devargs->args, NULL);
824         if (kvlist == NULL)
825                 return 0;
826
827         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
828                 rte_kvargs_free(kvlist);
829                 return 0;
830         }
831         /* Floating VEB is enabled when there's key-value:
832          * enable_floating_veb=1
833          */
834         if (rte_kvargs_process(kvlist, floating_veb_key,
835                                i40e_check_floating_handler, NULL) < 0) {
836                 rte_kvargs_free(kvlist);
837                 return 0;
838         }
839         rte_kvargs_free(kvlist);
840
841         return 1;
842 }
843
844 static void
845 config_floating_veb(struct rte_eth_dev *dev)
846 {
847         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
848         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
849         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
850
851         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
852
853         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
854                 pf->floating_veb =
855                         is_floating_veb_supported(pci_dev->device.devargs);
856                 config_vf_floating_veb(pci_dev->device.devargs,
857                                        pf->floating_veb,
858                                        pf->floating_veb_list);
859         } else {
860                 pf->floating_veb = false;
861         }
862 }
863
864 #define I40E_L2_TAGS_S_TAG_SHIFT 1
865 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
866
867 static int
868 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
869 {
870         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
871         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
872         char ethertype_hash_name[RTE_HASH_NAMESIZE];
873         int ret;
874
875         struct rte_hash_parameters ethertype_hash_params = {
876                 .name = ethertype_hash_name,
877                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
878                 .key_len = sizeof(struct i40e_ethertype_filter_input),
879                 .hash_func = rte_hash_crc,
880                 .hash_func_init_val = 0,
881                 .socket_id = rte_socket_id(),
882         };
883
884         /* Initialize ethertype filter rule list and hash */
885         TAILQ_INIT(&ethertype_rule->ethertype_list);
886         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
887                  "ethertype_%s", dev->device->name);
888         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
889         if (!ethertype_rule->hash_table) {
890                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
891                 return -EINVAL;
892         }
893         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
894                                        sizeof(struct i40e_ethertype_filter *) *
895                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
896                                        0);
897         if (!ethertype_rule->hash_map) {
898                 PMD_INIT_LOG(ERR,
899                              "Failed to allocate memory for ethertype hash map!");
900                 ret = -ENOMEM;
901                 goto err_ethertype_hash_map_alloc;
902         }
903
904         return 0;
905
906 err_ethertype_hash_map_alloc:
907         rte_hash_free(ethertype_rule->hash_table);
908
909         return ret;
910 }
911
912 static int
913 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
914 {
915         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
916         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
917         char tunnel_hash_name[RTE_HASH_NAMESIZE];
918         int ret;
919
920         struct rte_hash_parameters tunnel_hash_params = {
921                 .name = tunnel_hash_name,
922                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
923                 .key_len = sizeof(struct i40e_tunnel_filter_input),
924                 .hash_func = rte_hash_crc,
925                 .hash_func_init_val = 0,
926                 .socket_id = rte_socket_id(),
927         };
928
929         /* Initialize tunnel filter rule list and hash */
930         TAILQ_INIT(&tunnel_rule->tunnel_list);
931         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
932                  "tunnel_%s", dev->device->name);
933         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
934         if (!tunnel_rule->hash_table) {
935                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
936                 return -EINVAL;
937         }
938         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
939                                     sizeof(struct i40e_tunnel_filter *) *
940                                     I40E_MAX_TUNNEL_FILTER_NUM,
941                                     0);
942         if (!tunnel_rule->hash_map) {
943                 PMD_INIT_LOG(ERR,
944                              "Failed to allocate memory for tunnel hash map!");
945                 ret = -ENOMEM;
946                 goto err_tunnel_hash_map_alloc;
947         }
948
949         return 0;
950
951 err_tunnel_hash_map_alloc:
952         rte_hash_free(tunnel_rule->hash_table);
953
954         return ret;
955 }
956
957 static int
958 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
959 {
960         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
961         struct i40e_fdir_info *fdir_info = &pf->fdir;
962         char fdir_hash_name[RTE_HASH_NAMESIZE];
963         int ret;
964
965         struct rte_hash_parameters fdir_hash_params = {
966                 .name = fdir_hash_name,
967                 .entries = I40E_MAX_FDIR_FILTER_NUM,
968                 .key_len = sizeof(struct i40e_fdir_input),
969                 .hash_func = rte_hash_crc,
970                 .hash_func_init_val = 0,
971                 .socket_id = rte_socket_id(),
972         };
973
974         /* Initialize flow director filter rule list and hash */
975         TAILQ_INIT(&fdir_info->fdir_list);
976         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
977                  "fdir_%s", dev->device->name);
978         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
979         if (!fdir_info->hash_table) {
980                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
981                 return -EINVAL;
982         }
983         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
984                                           sizeof(struct i40e_fdir_filter *) *
985                                           I40E_MAX_FDIR_FILTER_NUM,
986                                           0);
987         if (!fdir_info->hash_map) {
988                 PMD_INIT_LOG(ERR,
989                              "Failed to allocate memory for fdir hash map!");
990                 ret = -ENOMEM;
991                 goto err_fdir_hash_map_alloc;
992         }
993         return 0;
994
995 err_fdir_hash_map_alloc:
996         rte_hash_free(fdir_info->hash_table);
997
998         return ret;
999 }
1000
1001 static void
1002 i40e_init_customized_info(struct i40e_pf *pf)
1003 {
1004         int i;
1005
1006         /* Initialize customized pctype */
1007         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1008                 pf->customized_pctype[i].index = i;
1009                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1010                 pf->customized_pctype[i].valid = false;
1011         }
1012
1013         pf->gtp_support = false;
1014 }
1015
1016 void
1017 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1018 {
1019         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1020         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1021         struct i40e_queue_regions *info = &pf->queue_region;
1022         uint16_t i;
1023
1024         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1025                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1026
1027         memset(info, 0, sizeof(struct i40e_queue_regions));
1028 }
1029
1030 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
1031
1032 static int
1033 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1034                                const char *value,
1035                                void *opaque)
1036 {
1037         struct i40e_pf *pf;
1038         unsigned long support_multi_driver;
1039         char *end;
1040
1041         pf = (struct i40e_pf *)opaque;
1042
1043         errno = 0;
1044         support_multi_driver = strtoul(value, &end, 10);
1045         if (errno != 0 || end == value || *end != 0) {
1046                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1047                 return -(EINVAL);
1048         }
1049
1050         if (support_multi_driver == 1 || support_multi_driver == 0)
1051                 pf->support_multi_driver = (bool)support_multi_driver;
1052         else
1053                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1054                             "enable global configuration by default."
1055                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1056         return 0;
1057 }
1058
1059 static int
1060 i40e_support_multi_driver(struct rte_eth_dev *dev)
1061 {
1062         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1063         static const char *const valid_keys[] = {
1064                 ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
1065         struct rte_kvargs *kvlist;
1066
1067         /* Enable global configuration by default */
1068         pf->support_multi_driver = false;
1069
1070         if (!dev->device->devargs)
1071                 return 0;
1072
1073         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1074         if (!kvlist)
1075                 return -EINVAL;
1076
1077         if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
1078                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1079                             "the first invalid or last valid one is used !",
1080                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1081
1082         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1083                                i40e_parse_multi_drv_handler, pf) < 0) {
1084                 rte_kvargs_free(kvlist);
1085                 return -EINVAL;
1086         }
1087
1088         rte_kvargs_free(kvlist);
1089         return 0;
1090 }
1091
1092 static int
1093 eth_i40e_dev_init(struct rte_eth_dev *dev)
1094 {
1095         struct rte_pci_device *pci_dev;
1096         struct rte_intr_handle *intr_handle;
1097         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1098         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1099         struct i40e_vsi *vsi;
1100         int ret;
1101         uint32_t len;
1102         uint8_t aq_fail = 0;
1103
1104         PMD_INIT_FUNC_TRACE();
1105
1106         dev->dev_ops = &i40e_eth_dev_ops;
1107         dev->rx_pkt_burst = i40e_recv_pkts;
1108         dev->tx_pkt_burst = i40e_xmit_pkts;
1109         dev->tx_pkt_prepare = i40e_prep_pkts;
1110
1111         /* for secondary processes, we don't initialise any further as primary
1112          * has already done this work. Only check we don't need a different
1113          * RX function */
1114         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1115                 i40e_set_rx_function(dev);
1116                 i40e_set_tx_function(dev);
1117                 return 0;
1118         }
1119         i40e_set_default_ptype_table(dev);
1120         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1121         intr_handle = &pci_dev->intr_handle;
1122
1123         rte_eth_copy_pci_info(dev, pci_dev);
1124
1125         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1126         pf->adapter->eth_dev = dev;
1127         pf->dev_data = dev->data;
1128
1129         hw->back = I40E_PF_TO_ADAPTER(pf);
1130         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1131         if (!hw->hw_addr) {
1132                 PMD_INIT_LOG(ERR,
1133                         "Hardware is not available, as address is NULL");
1134                 return -ENODEV;
1135         }
1136
1137         hw->vendor_id = pci_dev->id.vendor_id;
1138         hw->device_id = pci_dev->id.device_id;
1139         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1140         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1141         hw->bus.device = pci_dev->addr.devid;
1142         hw->bus.func = pci_dev->addr.function;
1143         hw->adapter_stopped = 0;
1144
1145         /* Check if need to support multi-driver */
1146         i40e_support_multi_driver(dev);
1147
1148         /* Make sure all is clean before doing PF reset */
1149         i40e_clear_hw(hw);
1150
1151         /* Initialize the hardware */
1152         i40e_hw_init(dev);
1153
1154         /* Reset here to make sure all is clean for each PF */
1155         ret = i40e_pf_reset(hw);
1156         if (ret) {
1157                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1158                 return ret;
1159         }
1160
1161         /* Initialize the shared code (base driver) */
1162         ret = i40e_init_shared_code(hw);
1163         if (ret) {
1164                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1165                 return ret;
1166         }
1167
1168         i40e_config_automask(pf);
1169
1170         i40e_set_default_pctype_table(dev);
1171
1172         /*
1173          * To work around the NVM issue, initialize registers
1174          * for packet type of QinQ by software.
1175          * It should be removed once issues are fixed in NVM.
1176          */
1177         if (!pf->support_multi_driver)
1178                 i40e_GLQF_reg_init(hw);
1179
1180         /* Initialize the input set for filters (hash and fd) to default value */
1181         i40e_filter_input_set_init(pf);
1182
1183         /* Initialize the parameters for adminq */
1184         i40e_init_adminq_parameter(hw);
1185         ret = i40e_init_adminq(hw);
1186         if (ret != I40E_SUCCESS) {
1187                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1188                 return -EIO;
1189         }
1190         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1191                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1192                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1193                      ((hw->nvm.version >> 12) & 0xf),
1194                      ((hw->nvm.version >> 4) & 0xff),
1195                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1196
1197         /* initialise the L3_MAP register */
1198         if (!pf->support_multi_driver) {
1199                 ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
1200                                                    0x00000028,  NULL);
1201                 if (ret)
1202                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1203                                      ret);
1204                 PMD_INIT_LOG(DEBUG,
1205                              "Global register 0x%08x is changed with 0x28",
1206                              I40E_GLQF_L3_MAP(40));
1207                 i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER);
1208         }
1209
1210         /* Need the special FW version to support floating VEB */
1211         config_floating_veb(dev);
1212         /* Clear PXE mode */
1213         i40e_clear_pxe_mode(hw);
1214         i40e_dev_sync_phy_type(hw);
1215
1216         /*
1217          * On X710, performance number is far from the expectation on recent
1218          * firmware versions. The fix for this issue may not be integrated in
1219          * the following firmware version. So the workaround in software driver
1220          * is needed. It needs to modify the initial values of 3 internal only
1221          * registers. Note that the workaround can be removed when it is fixed
1222          * in firmware in the future.
1223          */
1224         i40e_configure_registers(hw);
1225
1226         /* Get hw capabilities */
1227         ret = i40e_get_cap(hw);
1228         if (ret != I40E_SUCCESS) {
1229                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1230                 goto err_get_capabilities;
1231         }
1232
1233         /* Initialize parameters for PF */
1234         ret = i40e_pf_parameter_init(dev);
1235         if (ret != 0) {
1236                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1237                 goto err_parameter_init;
1238         }
1239
1240         /* Initialize the queue management */
1241         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1242         if (ret < 0) {
1243                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1244                 goto err_qp_pool_init;
1245         }
1246         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1247                                 hw->func_caps.num_msix_vectors - 1);
1248         if (ret < 0) {
1249                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1250                 goto err_msix_pool_init;
1251         }
1252
1253         /* Initialize lan hmc */
1254         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1255                                 hw->func_caps.num_rx_qp, 0, 0);
1256         if (ret != I40E_SUCCESS) {
1257                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1258                 goto err_init_lan_hmc;
1259         }
1260
1261         /* Configure lan hmc */
1262         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1263         if (ret != I40E_SUCCESS) {
1264                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1265                 goto err_configure_lan_hmc;
1266         }
1267
1268         /* Get and check the mac address */
1269         i40e_get_mac_addr(hw, hw->mac.addr);
1270         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1271                 PMD_INIT_LOG(ERR, "mac address is not valid");
1272                 ret = -EIO;
1273                 goto err_get_mac_addr;
1274         }
1275         /* Copy the permanent MAC address */
1276         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1277                         (struct ether_addr *) hw->mac.perm_addr);
1278
1279         /* Disable flow control */
1280         hw->fc.requested_mode = I40E_FC_NONE;
1281         i40e_set_fc(hw, &aq_fail, TRUE);
1282
1283         /* Set the global registers with default ether type value */
1284         if (!pf->support_multi_driver) {
1285                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1286                                          ETHER_TYPE_VLAN);
1287                 if (ret != I40E_SUCCESS) {
1288                         PMD_INIT_LOG(ERR,
1289                                      "Failed to set the default outer "
1290                                      "VLAN ether type");
1291                         goto err_setup_pf_switch;
1292                 }
1293         }
1294
1295         /* PF setup, which includes VSI setup */
1296         ret = i40e_pf_setup(pf);
1297         if (ret) {
1298                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1299                 goto err_setup_pf_switch;
1300         }
1301
1302         /* reset all stats of the device, including pf and main vsi */
1303         i40e_dev_stats_reset(dev);
1304
1305         vsi = pf->main_vsi;
1306
1307         /* Disable double vlan by default */
1308         i40e_vsi_config_double_vlan(vsi, FALSE);
1309
1310         /* Disable S-TAG identification when floating_veb is disabled */
1311         if (!pf->floating_veb) {
1312                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1313                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1314                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1315                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1316                 }
1317         }
1318
1319         if (!vsi->max_macaddrs)
1320                 len = ETHER_ADDR_LEN;
1321         else
1322                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1323
1324         /* Should be after VSI initialized */
1325         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1326         if (!dev->data->mac_addrs) {
1327                 PMD_INIT_LOG(ERR,
1328                         "Failed to allocated memory for storing mac address");
1329                 goto err_mac_alloc;
1330         }
1331         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1332                                         &dev->data->mac_addrs[0]);
1333
1334         /* Init dcb to sw mode by default */
1335         ret = i40e_dcb_init_configure(dev, TRUE);
1336         if (ret != I40E_SUCCESS) {
1337                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1338                 pf->flags &= ~I40E_FLAG_DCB;
1339         }
1340         /* Update HW struct after DCB configuration */
1341         i40e_get_cap(hw);
1342
1343         /* initialize pf host driver to setup SRIOV resource if applicable */
1344         i40e_pf_host_init(dev);
1345
1346         /* register callback func to eal lib */
1347         rte_intr_callback_register(intr_handle,
1348                                    i40e_dev_interrupt_handler, dev);
1349
1350         /* configure and enable device interrupt */
1351         i40e_pf_config_irq0(hw, TRUE);
1352         i40e_pf_enable_irq0(hw);
1353
1354         /* enable uio intr after callback register */
1355         rte_intr_enable(intr_handle);
1356
1357         /* By default disable flexible payload in global configuration */
1358         if (!pf->support_multi_driver)
1359                 i40e_flex_payload_reg_set_default(hw);
1360
1361         /*
1362          * Add an ethertype filter to drop all flow control frames transmitted
1363          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1364          * frames to wire.
1365          */
1366         i40e_add_tx_flow_control_drop_filter(pf);
1367
1368         /* Set the max frame size to 0x2600 by default,
1369          * in case other drivers changed the default value.
1370          */
1371         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1372
1373         /* initialize mirror rule list */
1374         TAILQ_INIT(&pf->mirror_list);
1375
1376         /* initialize Traffic Manager configuration */
1377         i40e_tm_conf_init(dev);
1378
1379         /* Initialize customized information */
1380         i40e_init_customized_info(pf);
1381
1382         ret = i40e_init_ethtype_filter_list(dev);
1383         if (ret < 0)
1384                 goto err_init_ethtype_filter_list;
1385         ret = i40e_init_tunnel_filter_list(dev);
1386         if (ret < 0)
1387                 goto err_init_tunnel_filter_list;
1388         ret = i40e_init_fdir_filter_list(dev);
1389         if (ret < 0)
1390                 goto err_init_fdir_filter_list;
1391
1392         /* initialize queue region configuration */
1393         i40e_init_queue_region_conf(dev);
1394
1395         /* initialize rss configuration from rte_flow */
1396         memset(&pf->rss_info, 0,
1397                 sizeof(struct i40e_rte_flow_rss_conf));
1398
1399         return 0;
1400
1401 err_init_fdir_filter_list:
1402         rte_free(pf->tunnel.hash_table);
1403         rte_free(pf->tunnel.hash_map);
1404 err_init_tunnel_filter_list:
1405         rte_free(pf->ethertype.hash_table);
1406         rte_free(pf->ethertype.hash_map);
1407 err_init_ethtype_filter_list:
1408         rte_free(dev->data->mac_addrs);
1409 err_mac_alloc:
1410         i40e_vsi_release(pf->main_vsi);
1411 err_setup_pf_switch:
1412 err_get_mac_addr:
1413 err_configure_lan_hmc:
1414         (void)i40e_shutdown_lan_hmc(hw);
1415 err_init_lan_hmc:
1416         i40e_res_pool_destroy(&pf->msix_pool);
1417 err_msix_pool_init:
1418         i40e_res_pool_destroy(&pf->qp_pool);
1419 err_qp_pool_init:
1420 err_parameter_init:
1421 err_get_capabilities:
1422         (void)i40e_shutdown_adminq(hw);
1423
1424         return ret;
1425 }
1426
1427 static void
1428 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1429 {
1430         struct i40e_ethertype_filter *p_ethertype;
1431         struct i40e_ethertype_rule *ethertype_rule;
1432
1433         ethertype_rule = &pf->ethertype;
1434         /* Remove all ethertype filter rules and hash */
1435         if (ethertype_rule->hash_map)
1436                 rte_free(ethertype_rule->hash_map);
1437         if (ethertype_rule->hash_table)
1438                 rte_hash_free(ethertype_rule->hash_table);
1439
1440         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1441                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1442                              p_ethertype, rules);
1443                 rte_free(p_ethertype);
1444         }
1445 }
1446
1447 static void
1448 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1449 {
1450         struct i40e_tunnel_filter *p_tunnel;
1451         struct i40e_tunnel_rule *tunnel_rule;
1452
1453         tunnel_rule = &pf->tunnel;
1454         /* Remove all tunnel director rules and hash */
1455         if (tunnel_rule->hash_map)
1456                 rte_free(tunnel_rule->hash_map);
1457         if (tunnel_rule->hash_table)
1458                 rte_hash_free(tunnel_rule->hash_table);
1459
1460         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1461                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1462                 rte_free(p_tunnel);
1463         }
1464 }
1465
1466 static void
1467 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1468 {
1469         struct i40e_fdir_filter *p_fdir;
1470         struct i40e_fdir_info *fdir_info;
1471
1472         fdir_info = &pf->fdir;
1473         /* Remove all flow director rules and hash */
1474         if (fdir_info->hash_map)
1475                 rte_free(fdir_info->hash_map);
1476         if (fdir_info->hash_table)
1477                 rte_hash_free(fdir_info->hash_table);
1478
1479         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1480                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1481                 rte_free(p_fdir);
1482         }
1483 }
1484
1485 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1486 {
1487         /*
1488          * Disable by default flexible payload
1489          * for corresponding L2/L3/L4 layers.
1490          */
1491         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1492         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1493         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1494         i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD);
1495 }
1496
1497 static int
1498 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1499 {
1500         struct i40e_pf *pf;
1501         struct rte_pci_device *pci_dev;
1502         struct rte_intr_handle *intr_handle;
1503         struct i40e_hw *hw;
1504         struct i40e_filter_control_settings settings;
1505         struct rte_flow *p_flow;
1506         int ret;
1507         uint8_t aq_fail = 0;
1508
1509         PMD_INIT_FUNC_TRACE();
1510
1511         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1512                 return 0;
1513
1514         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1515         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1516         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1517         intr_handle = &pci_dev->intr_handle;
1518
1519         if (hw->adapter_stopped == 0)
1520                 i40e_dev_close(dev);
1521
1522         dev->dev_ops = NULL;
1523         dev->rx_pkt_burst = NULL;
1524         dev->tx_pkt_burst = NULL;
1525
1526         /* Clear PXE mode */
1527         i40e_clear_pxe_mode(hw);
1528
1529         /* Unconfigure filter control */
1530         memset(&settings, 0, sizeof(settings));
1531         ret = i40e_set_filter_control(hw, &settings);
1532         if (ret)
1533                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1534                                         ret);
1535
1536         /* Disable flow control */
1537         hw->fc.requested_mode = I40E_FC_NONE;
1538         i40e_set_fc(hw, &aq_fail, TRUE);
1539
1540         /* uninitialize pf host driver */
1541         i40e_pf_host_uninit(dev);
1542
1543         rte_free(dev->data->mac_addrs);
1544         dev->data->mac_addrs = NULL;
1545
1546         /* disable uio intr before callback unregister */
1547         rte_intr_disable(intr_handle);
1548
1549         /* register callback func to eal lib */
1550         rte_intr_callback_unregister(intr_handle,
1551                                      i40e_dev_interrupt_handler, dev);
1552
1553         i40e_rm_ethtype_filter_list(pf);
1554         i40e_rm_tunnel_filter_list(pf);
1555         i40e_rm_fdir_filter_list(pf);
1556
1557         /* Remove all flows */
1558         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1559                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1560                 rte_free(p_flow);
1561         }
1562
1563         /* Remove all Traffic Manager configuration */
1564         i40e_tm_conf_uninit(dev);
1565
1566         return 0;
1567 }
1568
1569 static int
1570 i40e_dev_configure(struct rte_eth_dev *dev)
1571 {
1572         struct i40e_adapter *ad =
1573                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1574         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1575         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1576         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1577         int i, ret;
1578
1579         ret = i40e_dev_sync_phy_type(hw);
1580         if (ret)
1581                 return ret;
1582
1583         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1584          * bulk allocation or vector Rx preconditions we will reset it.
1585          */
1586         ad->rx_bulk_alloc_allowed = true;
1587         ad->rx_vec_allowed = true;
1588         ad->tx_simple_allowed = true;
1589         ad->tx_vec_allowed = true;
1590
1591         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1592                 ret = i40e_fdir_setup(pf);
1593                 if (ret != I40E_SUCCESS) {
1594                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1595                         return -ENOTSUP;
1596                 }
1597                 ret = i40e_fdir_configure(dev);
1598                 if (ret < 0) {
1599                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1600                         goto err;
1601                 }
1602         } else
1603                 i40e_fdir_teardown(pf);
1604
1605         ret = i40e_dev_init_vlan(dev);
1606         if (ret < 0)
1607                 goto err;
1608
1609         /* VMDQ setup.
1610          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1611          *  RSS setting have different requirements.
1612          *  General PMD driver call sequence are NIC init, configure,
1613          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1614          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1615          *  applicable. So, VMDQ setting has to be done before
1616          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1617          *  For RSS setting, it will try to calculate actual configured RX queue
1618          *  number, which will be available after rx_queue_setup(). dev_start()
1619          *  function is good to place RSS setup.
1620          */
1621         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1622                 ret = i40e_vmdq_setup(dev);
1623                 if (ret)
1624                         goto err;
1625         }
1626
1627         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1628                 ret = i40e_dcb_setup(dev);
1629                 if (ret) {
1630                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1631                         goto err_dcb;
1632                 }
1633         }
1634
1635         TAILQ_INIT(&pf->flow_list);
1636
1637         return 0;
1638
1639 err_dcb:
1640         /* need to release vmdq resource if exists */
1641         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1642                 i40e_vsi_release(pf->vmdq[i].vsi);
1643                 pf->vmdq[i].vsi = NULL;
1644         }
1645         rte_free(pf->vmdq);
1646         pf->vmdq = NULL;
1647 err:
1648         /* need to release fdir resource if exists */
1649         i40e_fdir_teardown(pf);
1650         return ret;
1651 }
1652
1653 void
1654 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1655 {
1656         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1657         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1658         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1659         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1660         uint16_t msix_vect = vsi->msix_intr;
1661         uint16_t i;
1662
1663         for (i = 0; i < vsi->nb_qps; i++) {
1664                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1665                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1666                 rte_wmb();
1667         }
1668
1669         if (vsi->type != I40E_VSI_SRIOV) {
1670                 if (!rte_intr_allow_others(intr_handle)) {
1671                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1672                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1673                         I40E_WRITE_REG(hw,
1674                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1675                                        0);
1676                 } else {
1677                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1678                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1679                         I40E_WRITE_REG(hw,
1680                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1681                                                        msix_vect - 1), 0);
1682                 }
1683         } else {
1684                 uint32_t reg;
1685                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1686                         vsi->user_param + (msix_vect - 1);
1687
1688                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1689                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1690         }
1691         I40E_WRITE_FLUSH(hw);
1692 }
1693
1694 static void
1695 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1696                        int base_queue, int nb_queue,
1697                        uint16_t itr_idx)
1698 {
1699         int i;
1700         uint32_t val;
1701         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1702         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1703
1704         /* Bind all RX queues to allocated MSIX interrupt */
1705         for (i = 0; i < nb_queue; i++) {
1706                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1707                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1708                         ((base_queue + i + 1) <<
1709                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1710                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1711                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1712
1713                 if (i == nb_queue - 1)
1714                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1715                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1716         }
1717
1718         /* Write first RX queue to Link list register as the head element */
1719         if (vsi->type != I40E_VSI_SRIOV) {
1720                 uint16_t interval =
1721                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1,
1722                                                pf->support_multi_driver);
1723
1724                 if (msix_vect == I40E_MISC_VEC_ID) {
1725                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1726                                        (base_queue <<
1727                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1728                                        (0x0 <<
1729                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1730                         I40E_WRITE_REG(hw,
1731                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1732                                        interval);
1733                 } else {
1734                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1735                                        (base_queue <<
1736                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1737                                        (0x0 <<
1738                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1739                         I40E_WRITE_REG(hw,
1740                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1741                                                        msix_vect - 1),
1742                                        interval);
1743                 }
1744         } else {
1745                 uint32_t reg;
1746
1747                 if (msix_vect == I40E_MISC_VEC_ID) {
1748                         I40E_WRITE_REG(hw,
1749                                        I40E_VPINT_LNKLST0(vsi->user_param),
1750                                        (base_queue <<
1751                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1752                                        (0x0 <<
1753                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1754                 } else {
1755                         /* num_msix_vectors_vf needs to minus irq0 */
1756                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1757                                 vsi->user_param + (msix_vect - 1);
1758
1759                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1760                                        (base_queue <<
1761                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1762                                        (0x0 <<
1763                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1764                 }
1765         }
1766
1767         I40E_WRITE_FLUSH(hw);
1768 }
1769
1770 void
1771 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
1772 {
1773         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1774         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1775         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1776         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1777         uint16_t msix_vect = vsi->msix_intr;
1778         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1779         uint16_t queue_idx = 0;
1780         int record = 0;
1781         int i;
1782
1783         for (i = 0; i < vsi->nb_qps; i++) {
1784                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1785                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1786         }
1787
1788         /* VF bind interrupt */
1789         if (vsi->type == I40E_VSI_SRIOV) {
1790                 __vsi_queues_bind_intr(vsi, msix_vect,
1791                                        vsi->base_queue, vsi->nb_qps,
1792                                        itr_idx);
1793                 return;
1794         }
1795
1796         /* PF & VMDq bind interrupt */
1797         if (rte_intr_dp_is_en(intr_handle)) {
1798                 if (vsi->type == I40E_VSI_MAIN) {
1799                         queue_idx = 0;
1800                         record = 1;
1801                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1802                         struct i40e_vsi *main_vsi =
1803                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1804                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1805                         record = 1;
1806                 }
1807         }
1808
1809         for (i = 0; i < vsi->nb_used_qps; i++) {
1810                 if (nb_msix <= 1) {
1811                         if (!rte_intr_allow_others(intr_handle))
1812                                 /* allow to share MISC_VEC_ID */
1813                                 msix_vect = I40E_MISC_VEC_ID;
1814
1815                         /* no enough msix_vect, map all to one */
1816                         __vsi_queues_bind_intr(vsi, msix_vect,
1817                                                vsi->base_queue + i,
1818                                                vsi->nb_used_qps - i,
1819                                                itr_idx);
1820                         for (; !!record && i < vsi->nb_used_qps; i++)
1821                                 intr_handle->intr_vec[queue_idx + i] =
1822                                         msix_vect;
1823                         break;
1824                 }
1825                 /* 1:1 queue/msix_vect mapping */
1826                 __vsi_queues_bind_intr(vsi, msix_vect,
1827                                        vsi->base_queue + i, 1,
1828                                        itr_idx);
1829                 if (!!record)
1830                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1831
1832                 msix_vect++;
1833                 nb_msix--;
1834         }
1835 }
1836
1837 static void
1838 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1839 {
1840         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1841         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1842         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1843         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1844         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1845         uint16_t msix_intr, i;
1846
1847         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1848                 for (i = 0; i < vsi->nb_msix; i++) {
1849                         msix_intr = vsi->msix_intr + i;
1850                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1851                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1852                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1853                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1854                 }
1855         else
1856                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1857                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1858                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1859                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1860
1861         I40E_WRITE_FLUSH(hw);
1862 }
1863
1864 static void
1865 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1866 {
1867         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1868         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1869         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1870         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1871         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1872         uint16_t msix_intr, i;
1873
1874         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1875                 for (i = 0; i < vsi->nb_msix; i++) {
1876                         msix_intr = vsi->msix_intr + i;
1877                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1878                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1879                 }
1880         else
1881                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1882                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1883
1884         I40E_WRITE_FLUSH(hw);
1885 }
1886
1887 static inline uint8_t
1888 i40e_parse_link_speeds(uint16_t link_speeds)
1889 {
1890         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1891
1892         if (link_speeds & ETH_LINK_SPEED_40G)
1893                 link_speed |= I40E_LINK_SPEED_40GB;
1894         if (link_speeds & ETH_LINK_SPEED_25G)
1895                 link_speed |= I40E_LINK_SPEED_25GB;
1896         if (link_speeds & ETH_LINK_SPEED_20G)
1897                 link_speed |= I40E_LINK_SPEED_20GB;
1898         if (link_speeds & ETH_LINK_SPEED_10G)
1899                 link_speed |= I40E_LINK_SPEED_10GB;
1900         if (link_speeds & ETH_LINK_SPEED_1G)
1901                 link_speed |= I40E_LINK_SPEED_1GB;
1902         if (link_speeds & ETH_LINK_SPEED_100M)
1903                 link_speed |= I40E_LINK_SPEED_100MB;
1904
1905         return link_speed;
1906 }
1907
1908 static int
1909 i40e_phy_conf_link(struct i40e_hw *hw,
1910                    uint8_t abilities,
1911                    uint8_t force_speed,
1912                    bool is_up)
1913 {
1914         enum i40e_status_code status;
1915         struct i40e_aq_get_phy_abilities_resp phy_ab;
1916         struct i40e_aq_set_phy_config phy_conf;
1917         enum i40e_aq_phy_type cnt;
1918         uint32_t phy_type_mask = 0;
1919
1920         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1921                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1922                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1923                         I40E_AQ_PHY_FLAG_LOW_POWER;
1924         const uint8_t advt = I40E_LINK_SPEED_40GB |
1925                         I40E_LINK_SPEED_25GB |
1926                         I40E_LINK_SPEED_10GB |
1927                         I40E_LINK_SPEED_1GB |
1928                         I40E_LINK_SPEED_100MB;
1929         int ret = -ENOTSUP;
1930
1931
1932         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1933                                               NULL);
1934         if (status)
1935                 return ret;
1936
1937         /* If link already up, no need to set up again */
1938         if (is_up && phy_ab.phy_type != 0)
1939                 return I40E_SUCCESS;
1940
1941         memset(&phy_conf, 0, sizeof(phy_conf));
1942
1943         /* bits 0-2 use the values from get_phy_abilities_resp */
1944         abilities &= ~mask;
1945         abilities |= phy_ab.abilities & mask;
1946
1947         /* update ablities and speed */
1948         if (abilities & I40E_AQ_PHY_AN_ENABLED)
1949                 phy_conf.link_speed = advt;
1950         else
1951                 phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
1952
1953         phy_conf.abilities = abilities;
1954
1955
1956
1957         /* To enable link, phy_type mask needs to include each type */
1958         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
1959                 phy_type_mask |= 1 << cnt;
1960
1961         /* use get_phy_abilities_resp value for the rest */
1962         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
1963         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
1964                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
1965                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
1966         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
1967         phy_conf.eee_capability = phy_ab.eee_capability;
1968         phy_conf.eeer = phy_ab.eeer_val;
1969         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1970
1971         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1972                     phy_ab.abilities, phy_ab.link_speed);
1973         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
1974                     phy_conf.abilities, phy_conf.link_speed);
1975
1976         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
1977         if (status)
1978                 return ret;
1979
1980         return I40E_SUCCESS;
1981 }
1982
1983 static int
1984 i40e_apply_link_speed(struct rte_eth_dev *dev)
1985 {
1986         uint8_t speed;
1987         uint8_t abilities = 0;
1988         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1989         struct rte_eth_conf *conf = &dev->data->dev_conf;
1990
1991         speed = i40e_parse_link_speeds(conf->link_speeds);
1992         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1993         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
1994                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1995         abilities |= I40E_AQ_PHY_LINK_ENABLED;
1996
1997         return i40e_phy_conf_link(hw, abilities, speed, true);
1998 }
1999
2000 static int
2001 i40e_dev_start(struct rte_eth_dev *dev)
2002 {
2003         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2004         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2005         struct i40e_vsi *main_vsi = pf->main_vsi;
2006         int ret, i;
2007         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2008         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2009         uint32_t intr_vector = 0;
2010         struct i40e_vsi *vsi;
2011
2012         hw->adapter_stopped = 0;
2013
2014         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2015                 PMD_INIT_LOG(ERR,
2016                 "Invalid link_speeds for port %u, autonegotiation disabled",
2017                               dev->data->port_id);
2018                 return -EINVAL;
2019         }
2020
2021         rte_intr_disable(intr_handle);
2022
2023         if ((rte_intr_cap_multiple(intr_handle) ||
2024              !RTE_ETH_DEV_SRIOV(dev).active) &&
2025             dev->data->dev_conf.intr_conf.rxq != 0) {
2026                 intr_vector = dev->data->nb_rx_queues;
2027                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2028                 if (ret)
2029                         return ret;
2030         }
2031
2032         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2033                 intr_handle->intr_vec =
2034                         rte_zmalloc("intr_vec",
2035                                     dev->data->nb_rx_queues * sizeof(int),
2036                                     0);
2037                 if (!intr_handle->intr_vec) {
2038                         PMD_INIT_LOG(ERR,
2039                                 "Failed to allocate %d rx_queues intr_vec",
2040                                 dev->data->nb_rx_queues);
2041                         return -ENOMEM;
2042                 }
2043         }
2044
2045         /* Initialize VSI */
2046         ret = i40e_dev_rxtx_init(pf);
2047         if (ret != I40E_SUCCESS) {
2048                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2049                 goto err_up;
2050         }
2051
2052         /* Map queues with MSIX interrupt */
2053         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2054                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2055         i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2056         i40e_vsi_enable_queues_intr(main_vsi);
2057
2058         /* Map VMDQ VSI queues with MSIX interrupt */
2059         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2060                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2061                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2062                                           I40E_ITR_INDEX_DEFAULT);
2063                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2064         }
2065
2066         /* enable FDIR MSIX interrupt */
2067         if (pf->fdir.fdir_vsi) {
2068                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
2069                                           I40E_ITR_INDEX_NONE);
2070                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
2071         }
2072
2073         /* Enable all queues which have been configured */
2074         ret = i40e_dev_switch_queues(pf, TRUE);
2075         if (ret != I40E_SUCCESS) {
2076                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
2077                 goto err_up;
2078         }
2079
2080         /* Enable receiving broadcast packets */
2081         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2082         if (ret != I40E_SUCCESS)
2083                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2084
2085         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2086                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2087                                                 true, NULL);
2088                 if (ret != I40E_SUCCESS)
2089                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2090         }
2091
2092         /* Enable the VLAN promiscuous mode. */
2093         if (pf->vfs) {
2094                 for (i = 0; i < pf->vf_num; i++) {
2095                         vsi = pf->vfs[i].vsi;
2096                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2097                                                      true, NULL);
2098                 }
2099         }
2100
2101         /* Enable mac loopback mode */
2102         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2103             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2104                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2105                 if (ret != I40E_SUCCESS) {
2106                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2107                         goto err_up;
2108                 }
2109         }
2110
2111         /* Apply link configure */
2112         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
2113                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
2114                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
2115                                 ETH_LINK_SPEED_40G)) {
2116                 PMD_DRV_LOG(ERR, "Invalid link setting");
2117                 goto err_up;
2118         }
2119         ret = i40e_apply_link_speed(dev);
2120         if (I40E_SUCCESS != ret) {
2121                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2122                 goto err_up;
2123         }
2124
2125         if (!rte_intr_allow_others(intr_handle)) {
2126                 rte_intr_callback_unregister(intr_handle,
2127                                              i40e_dev_interrupt_handler,
2128                                              (void *)dev);
2129                 /* configure and enable device interrupt */
2130                 i40e_pf_config_irq0(hw, FALSE);
2131                 i40e_pf_enable_irq0(hw);
2132
2133                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2134                         PMD_INIT_LOG(INFO,
2135                                 "lsc won't enable because of no intr multiplex");
2136         } else {
2137                 ret = i40e_aq_set_phy_int_mask(hw,
2138                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2139                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2140                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2141                 if (ret != I40E_SUCCESS)
2142                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2143
2144                 /* Call get_link_info aq commond to enable/disable LSE */
2145                 i40e_dev_link_update(dev, 0);
2146         }
2147
2148         /* enable uio intr after callback register */
2149         rte_intr_enable(intr_handle);
2150
2151         i40e_filter_restore(pf);
2152
2153         if (pf->tm_conf.root && !pf->tm_conf.committed)
2154                 PMD_DRV_LOG(WARNING,
2155                             "please call hierarchy_commit() "
2156                             "before starting the port");
2157
2158         return I40E_SUCCESS;
2159
2160 err_up:
2161         i40e_dev_switch_queues(pf, FALSE);
2162         i40e_dev_clear_queues(dev);
2163
2164         return ret;
2165 }
2166
2167 static void
2168 i40e_dev_stop(struct rte_eth_dev *dev)
2169 {
2170         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2171         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2172         struct i40e_vsi *main_vsi = pf->main_vsi;
2173         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2174         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2175         int i;
2176
2177         if (hw->adapter_stopped == 1)
2178                 return;
2179         /* Disable all queues */
2180         i40e_dev_switch_queues(pf, FALSE);
2181
2182         /* un-map queues with interrupt registers */
2183         i40e_vsi_disable_queues_intr(main_vsi);
2184         i40e_vsi_queues_unbind_intr(main_vsi);
2185
2186         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2187                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2188                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2189         }
2190
2191         if (pf->fdir.fdir_vsi) {
2192                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2193                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2194         }
2195         /* Clear all queues and release memory */
2196         i40e_dev_clear_queues(dev);
2197
2198         /* Set link down */
2199         i40e_dev_set_link_down(dev);
2200
2201         if (!rte_intr_allow_others(intr_handle))
2202                 /* resume to the default handler */
2203                 rte_intr_callback_register(intr_handle,
2204                                            i40e_dev_interrupt_handler,
2205                                            (void *)dev);
2206
2207         /* Clean datapath event and queue/vec mapping */
2208         rte_intr_efd_disable(intr_handle);
2209         if (intr_handle->intr_vec) {
2210                 rte_free(intr_handle->intr_vec);
2211                 intr_handle->intr_vec = NULL;
2212         }
2213
2214         /* reset hierarchy commit */
2215         pf->tm_conf.committed = false;
2216
2217         hw->adapter_stopped = 1;
2218 }
2219
2220 static void
2221 i40e_dev_close(struct rte_eth_dev *dev)
2222 {
2223         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2224         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2225         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2226         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2227         struct i40e_mirror_rule *p_mirror;
2228         uint32_t reg;
2229         int i;
2230         int ret;
2231
2232         PMD_INIT_FUNC_TRACE();
2233
2234         i40e_dev_stop(dev);
2235
2236         /* Remove all mirror rules */
2237         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2238                 ret = i40e_aq_del_mirror_rule(hw,
2239                                               pf->main_vsi->veb->seid,
2240                                               p_mirror->rule_type,
2241                                               p_mirror->entries,
2242                                               p_mirror->num_entries,
2243                                               p_mirror->id);
2244                 if (ret < 0)
2245                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2246                                     "status = %d, aq_err = %d.", ret,
2247                                     hw->aq.asq_last_status);
2248
2249                 /* remove mirror software resource anyway */
2250                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2251                 rte_free(p_mirror);
2252                 pf->nb_mirror_rule--;
2253         }
2254
2255         i40e_dev_free_queues(dev);
2256
2257         /* Disable interrupt */
2258         i40e_pf_disable_irq0(hw);
2259         rte_intr_disable(intr_handle);
2260
2261         /* shutdown and destroy the HMC */
2262         i40e_shutdown_lan_hmc(hw);
2263
2264         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2265                 i40e_vsi_release(pf->vmdq[i].vsi);
2266                 pf->vmdq[i].vsi = NULL;
2267         }
2268         rte_free(pf->vmdq);
2269         pf->vmdq = NULL;
2270
2271         /* release all the existing VSIs and VEBs */
2272         i40e_fdir_teardown(pf);
2273         i40e_vsi_release(pf->main_vsi);
2274
2275         /* shutdown the adminq */
2276         i40e_aq_queue_shutdown(hw, true);
2277         i40e_shutdown_adminq(hw);
2278
2279         i40e_res_pool_destroy(&pf->qp_pool);
2280         i40e_res_pool_destroy(&pf->msix_pool);
2281
2282         /* Disable flexible payload in global configuration */
2283         if (!pf->support_multi_driver)
2284                 i40e_flex_payload_reg_set_default(hw);
2285
2286         /* force a PF reset to clean anything leftover */
2287         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2288         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2289                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2290         I40E_WRITE_FLUSH(hw);
2291 }
2292
2293 /*
2294  * Reset PF device only to re-initialize resources in PMD layer
2295  */
2296 static int
2297 i40e_dev_reset(struct rte_eth_dev *dev)
2298 {
2299         int ret;
2300
2301         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2302          * its VF to make them align with it. The detailed notification
2303          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2304          * To avoid unexpected behavior in VF, currently reset of PF with
2305          * SR-IOV activation is not supported. It might be supported later.
2306          */
2307         if (dev->data->sriov.active)
2308                 return -ENOTSUP;
2309
2310         ret = eth_i40e_dev_uninit(dev);
2311         if (ret)
2312                 return ret;
2313
2314         ret = eth_i40e_dev_init(dev);
2315
2316         return ret;
2317 }
2318
2319 static void
2320 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2321 {
2322         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2323         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2324         struct i40e_vsi *vsi = pf->main_vsi;
2325         int status;
2326
2327         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2328                                                      true, NULL, true);
2329         if (status != I40E_SUCCESS)
2330                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2331
2332         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2333                                                         TRUE, NULL);
2334         if (status != I40E_SUCCESS)
2335                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2336
2337 }
2338
2339 static void
2340 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2341 {
2342         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2343         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2344         struct i40e_vsi *vsi = pf->main_vsi;
2345         int status;
2346
2347         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2348                                                      false, NULL, true);
2349         if (status != I40E_SUCCESS)
2350                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2351
2352         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2353                                                         false, NULL);
2354         if (status != I40E_SUCCESS)
2355                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2356 }
2357
2358 static void
2359 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2360 {
2361         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2362         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2363         struct i40e_vsi *vsi = pf->main_vsi;
2364         int ret;
2365
2366         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2367         if (ret != I40E_SUCCESS)
2368                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2369 }
2370
2371 static void
2372 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2373 {
2374         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2375         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2376         struct i40e_vsi *vsi = pf->main_vsi;
2377         int ret;
2378
2379         if (dev->data->promiscuous == 1)
2380                 return; /* must remain in all_multicast mode */
2381
2382         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2383                                 vsi->seid, FALSE, NULL);
2384         if (ret != I40E_SUCCESS)
2385                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2386 }
2387
2388 /*
2389  * Set device link up.
2390  */
2391 static int
2392 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2393 {
2394         /* re-apply link speed setting */
2395         return i40e_apply_link_speed(dev);
2396 }
2397
2398 /*
2399  * Set device link down.
2400  */
2401 static int
2402 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2403 {
2404         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2405         uint8_t abilities = 0;
2406         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2407
2408         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2409         return i40e_phy_conf_link(hw, abilities, speed, false);
2410 }
2411
2412 static __rte_always_inline void
2413 update_link_no_wait(struct i40e_hw *hw, struct rte_eth_link *link)
2414 {
2415 /* Link status registers and values*/
2416 #define I40E_PRTMAC_LINKSTA             0x001E2420
2417 #define I40E_REG_LINK_UP                0x40000080
2418 #define I40E_PRTMAC_MACC                0x001E24E0
2419 #define I40E_REG_MACC_25GB              0x00020000
2420 #define I40E_REG_SPEED_MASK             0x38000000
2421 #define I40E_REG_SPEED_100MB            0x00000000
2422 #define I40E_REG_SPEED_1GB              0x08000000
2423 #define I40E_REG_SPEED_10GB             0x10000000
2424 #define I40E_REG_SPEED_20GB             0x20000000
2425 #define I40E_REG_SPEED_25_40GB          0x18000000
2426         uint32_t link_speed;
2427         uint32_t reg_val;
2428
2429         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2430         link_speed = reg_val & I40E_REG_SPEED_MASK;
2431         reg_val &= I40E_REG_LINK_UP;
2432         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2433
2434         if (unlikely(link->link_status != 0))
2435                 return;
2436
2437         /* Parse the link status */
2438         switch (link_speed) {
2439         case I40E_REG_SPEED_100MB:
2440                 link->link_speed = ETH_SPEED_NUM_100M;
2441                 break;
2442         case I40E_REG_SPEED_1GB:
2443                 link->link_speed = ETH_SPEED_NUM_1G;
2444                 break;
2445         case I40E_REG_SPEED_10GB:
2446                 link->link_speed = ETH_SPEED_NUM_10G;
2447                 break;
2448         case I40E_REG_SPEED_20GB:
2449                 link->link_speed = ETH_SPEED_NUM_20G;
2450                 break;
2451         case I40E_REG_SPEED_25_40GB:
2452                 reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2453
2454                 if (reg_val & I40E_REG_MACC_25GB)
2455                         link->link_speed = ETH_SPEED_NUM_25G;
2456                 else
2457                         link->link_speed = ETH_SPEED_NUM_40G;
2458
2459                 break;
2460         default:
2461                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2462                 break;
2463         }
2464 }
2465
2466 static __rte_always_inline void
2467 update_link_wait(struct i40e_hw *hw, struct rte_eth_link *link,
2468         bool enable_lse)
2469 {
2470 #define CHECK_INTERVAL             100  /* 100ms */
2471 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2472         uint32_t rep_cnt = MAX_REPEAT_TIME;
2473         struct i40e_link_status link_status;
2474         int status;
2475
2476         memset(&link_status, 0, sizeof(link_status));
2477
2478         do {
2479                 memset(&link_status, 0, sizeof(link_status));
2480
2481                 /* Get link status information from hardware */
2482                 status = i40e_aq_get_link_info(hw, enable_lse,
2483                                                 &link_status, NULL);
2484                 if (unlikely(status != I40E_SUCCESS)) {
2485                         link->link_speed = ETH_SPEED_NUM_100M;
2486                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2487                         PMD_DRV_LOG(ERR, "Failed to get link info");
2488                         return;
2489                 }
2490
2491                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2492                 if (unlikely(link->link_status != 0))
2493                         return;
2494
2495                 rte_delay_ms(CHECK_INTERVAL);
2496         } while (--rep_cnt);
2497
2498         /* Parse the link status */
2499         switch (link_status.link_speed) {
2500         case I40E_LINK_SPEED_100MB:
2501                 link->link_speed = ETH_SPEED_NUM_100M;
2502                 break;
2503         case I40E_LINK_SPEED_1GB:
2504                 link->link_speed = ETH_SPEED_NUM_1G;
2505                 break;
2506         case I40E_LINK_SPEED_10GB:
2507                 link->link_speed = ETH_SPEED_NUM_10G;
2508                 break;
2509         case I40E_LINK_SPEED_20GB:
2510                 link->link_speed = ETH_SPEED_NUM_20G;
2511                 break;
2512         case I40E_LINK_SPEED_25GB:
2513                 link->link_speed = ETH_SPEED_NUM_25G;
2514                 break;
2515         case I40E_LINK_SPEED_40GB:
2516                 link->link_speed = ETH_SPEED_NUM_40G;
2517                 break;
2518         default:
2519                 link->link_speed = ETH_SPEED_NUM_100M;
2520                 break;
2521         }
2522 }
2523
2524 int
2525 i40e_dev_link_update(struct rte_eth_dev *dev,
2526                      int wait_to_complete)
2527 {
2528         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2529         struct rte_eth_link link;
2530         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2531         int ret;
2532
2533         memset(&link, 0, sizeof(link));
2534
2535         /* i40e uses full duplex only */
2536         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2537         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2538                         ETH_LINK_SPEED_FIXED);
2539
2540         if (!wait_to_complete)
2541                 update_link_no_wait(hw, &link);
2542         else
2543                 update_link_wait(hw, &link, enable_lse);
2544
2545         ret = rte_eth_linkstatus_set(dev, &link);
2546         i40e_notify_all_vfs_link_status(dev);
2547
2548         return ret;
2549 }
2550
2551 /* Get all the statistics of a VSI */
2552 void
2553 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2554 {
2555         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2556         struct i40e_eth_stats *nes = &vsi->eth_stats;
2557         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2558         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2559
2560         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2561                             vsi->offset_loaded, &oes->rx_bytes,
2562                             &nes->rx_bytes);
2563         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2564                             vsi->offset_loaded, &oes->rx_unicast,
2565                             &nes->rx_unicast);
2566         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2567                             vsi->offset_loaded, &oes->rx_multicast,
2568                             &nes->rx_multicast);
2569         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2570                             vsi->offset_loaded, &oes->rx_broadcast,
2571                             &nes->rx_broadcast);
2572         /* exclude CRC bytes */
2573         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2574                 nes->rx_broadcast) * ETHER_CRC_LEN;
2575
2576         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2577                             &oes->rx_discards, &nes->rx_discards);
2578         /* GLV_REPC not supported */
2579         /* GLV_RMPC not supported */
2580         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2581                             &oes->rx_unknown_protocol,
2582                             &nes->rx_unknown_protocol);
2583         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2584                             vsi->offset_loaded, &oes->tx_bytes,
2585                             &nes->tx_bytes);
2586         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2587                             vsi->offset_loaded, &oes->tx_unicast,
2588                             &nes->tx_unicast);
2589         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2590                             vsi->offset_loaded, &oes->tx_multicast,
2591                             &nes->tx_multicast);
2592         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2593                             vsi->offset_loaded,  &oes->tx_broadcast,
2594                             &nes->tx_broadcast);
2595         /* GLV_TDPC not supported */
2596         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2597                             &oes->tx_errors, &nes->tx_errors);
2598         vsi->offset_loaded = true;
2599
2600         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2601                     vsi->vsi_id);
2602         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2603         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2604         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2605         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2606         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2607         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2608                     nes->rx_unknown_protocol);
2609         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2610         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2611         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2612         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2613         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2614         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2615         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2616                     vsi->vsi_id);
2617 }
2618
2619 static void
2620 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2621 {
2622         unsigned int i;
2623         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2624         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2625
2626         /* Get rx/tx bytes of internal transfer packets */
2627         i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2628                         I40E_GLV_GORCL(hw->port),
2629                         pf->offset_loaded,
2630                         &pf->internal_stats_offset.rx_bytes,
2631                         &pf->internal_stats.rx_bytes);
2632
2633         i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2634                         I40E_GLV_GOTCL(hw->port),
2635                         pf->offset_loaded,
2636                         &pf->internal_stats_offset.tx_bytes,
2637                         &pf->internal_stats.tx_bytes);
2638         /* Get total internal rx packet count */
2639         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2640                             I40E_GLV_UPRCL(hw->port),
2641                             pf->offset_loaded,
2642                             &pf->internal_stats_offset.rx_unicast,
2643                             &pf->internal_stats.rx_unicast);
2644         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2645                             I40E_GLV_MPRCL(hw->port),
2646                             pf->offset_loaded,
2647                             &pf->internal_stats_offset.rx_multicast,
2648                             &pf->internal_stats.rx_multicast);
2649         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2650                             I40E_GLV_BPRCL(hw->port),
2651                             pf->offset_loaded,
2652                             &pf->internal_stats_offset.rx_broadcast,
2653                             &pf->internal_stats.rx_broadcast);
2654         /* Get total internal tx packet count */
2655         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
2656                             I40E_GLV_UPTCL(hw->port),
2657                             pf->offset_loaded,
2658                             &pf->internal_stats_offset.tx_unicast,
2659                             &pf->internal_stats.tx_unicast);
2660         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
2661                             I40E_GLV_MPTCL(hw->port),
2662                             pf->offset_loaded,
2663                             &pf->internal_stats_offset.tx_multicast,
2664                             &pf->internal_stats.tx_multicast);
2665         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
2666                             I40E_GLV_BPTCL(hw->port),
2667                             pf->offset_loaded,
2668                             &pf->internal_stats_offset.tx_broadcast,
2669                             &pf->internal_stats.tx_broadcast);
2670
2671         /* exclude CRC size */
2672         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2673                 pf->internal_stats.rx_multicast +
2674                 pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2675
2676         /* Get statistics of struct i40e_eth_stats */
2677         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2678                             I40E_GLPRT_GORCL(hw->port),
2679                             pf->offset_loaded, &os->eth.rx_bytes,
2680                             &ns->eth.rx_bytes);
2681         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2682                             I40E_GLPRT_UPRCL(hw->port),
2683                             pf->offset_loaded, &os->eth.rx_unicast,
2684                             &ns->eth.rx_unicast);
2685         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2686                             I40E_GLPRT_MPRCL(hw->port),
2687                             pf->offset_loaded, &os->eth.rx_multicast,
2688                             &ns->eth.rx_multicast);
2689         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2690                             I40E_GLPRT_BPRCL(hw->port),
2691                             pf->offset_loaded, &os->eth.rx_broadcast,
2692                             &ns->eth.rx_broadcast);
2693         /* Workaround: CRC size should not be included in byte statistics,
2694          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2695          */
2696         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2697                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2698
2699         /* exclude internal rx bytes
2700          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2701          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
2702          * value.
2703          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
2704          */
2705         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2706                 ns->eth.rx_bytes = 0;
2707         else
2708                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2709
2710         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
2711                 ns->eth.rx_unicast = 0;
2712         else
2713                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
2714
2715         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
2716                 ns->eth.rx_multicast = 0;
2717         else
2718                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
2719
2720         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
2721                 ns->eth.rx_broadcast = 0;
2722         else
2723                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
2724
2725         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2726                             pf->offset_loaded, &os->eth.rx_discards,
2727                             &ns->eth.rx_discards);
2728         /* GLPRT_REPC not supported */
2729         /* GLPRT_RMPC not supported */
2730         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2731                             pf->offset_loaded,
2732                             &os->eth.rx_unknown_protocol,
2733                             &ns->eth.rx_unknown_protocol);
2734         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2735                             I40E_GLPRT_GOTCL(hw->port),
2736                             pf->offset_loaded, &os->eth.tx_bytes,
2737                             &ns->eth.tx_bytes);
2738         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2739                             I40E_GLPRT_UPTCL(hw->port),
2740                             pf->offset_loaded, &os->eth.tx_unicast,
2741                             &ns->eth.tx_unicast);
2742         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2743                             I40E_GLPRT_MPTCL(hw->port),
2744                             pf->offset_loaded, &os->eth.tx_multicast,
2745                             &ns->eth.tx_multicast);
2746         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2747                             I40E_GLPRT_BPTCL(hw->port),
2748                             pf->offset_loaded, &os->eth.tx_broadcast,
2749                             &ns->eth.tx_broadcast);
2750         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2751                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2752
2753         /* exclude internal tx bytes
2754          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
2755          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
2756          * value.
2757          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
2758          */
2759         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
2760                 ns->eth.tx_bytes = 0;
2761         else
2762                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
2763
2764         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
2765                 ns->eth.tx_unicast = 0;
2766         else
2767                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
2768
2769         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
2770                 ns->eth.tx_multicast = 0;
2771         else
2772                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
2773
2774         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
2775                 ns->eth.tx_broadcast = 0;
2776         else
2777                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
2778
2779         /* GLPRT_TEPC not supported */
2780
2781         /* additional port specific stats */
2782         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2783                             pf->offset_loaded, &os->tx_dropped_link_down,
2784                             &ns->tx_dropped_link_down);
2785         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2786                             pf->offset_loaded, &os->crc_errors,
2787                             &ns->crc_errors);
2788         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2789                             pf->offset_loaded, &os->illegal_bytes,
2790                             &ns->illegal_bytes);
2791         /* GLPRT_ERRBC not supported */
2792         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2793                             pf->offset_loaded, &os->mac_local_faults,
2794                             &ns->mac_local_faults);
2795         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2796                             pf->offset_loaded, &os->mac_remote_faults,
2797                             &ns->mac_remote_faults);
2798         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2799                             pf->offset_loaded, &os->rx_length_errors,
2800                             &ns->rx_length_errors);
2801         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2802                             pf->offset_loaded, &os->link_xon_rx,
2803                             &ns->link_xon_rx);
2804         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2805                             pf->offset_loaded, &os->link_xoff_rx,
2806                             &ns->link_xoff_rx);
2807         for (i = 0; i < 8; i++) {
2808                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2809                                     pf->offset_loaded,
2810                                     &os->priority_xon_rx[i],
2811                                     &ns->priority_xon_rx[i]);
2812                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2813                                     pf->offset_loaded,
2814                                     &os->priority_xoff_rx[i],
2815                                     &ns->priority_xoff_rx[i]);
2816         }
2817         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2818                             pf->offset_loaded, &os->link_xon_tx,
2819                             &ns->link_xon_tx);
2820         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2821                             pf->offset_loaded, &os->link_xoff_tx,
2822                             &ns->link_xoff_tx);
2823         for (i = 0; i < 8; i++) {
2824                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2825                                     pf->offset_loaded,
2826                                     &os->priority_xon_tx[i],
2827                                     &ns->priority_xon_tx[i]);
2828                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2829                                     pf->offset_loaded,
2830                                     &os->priority_xoff_tx[i],
2831                                     &ns->priority_xoff_tx[i]);
2832                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2833                                     pf->offset_loaded,
2834                                     &os->priority_xon_2_xoff[i],
2835                                     &ns->priority_xon_2_xoff[i]);
2836         }
2837         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2838                             I40E_GLPRT_PRC64L(hw->port),
2839                             pf->offset_loaded, &os->rx_size_64,
2840                             &ns->rx_size_64);
2841         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2842                             I40E_GLPRT_PRC127L(hw->port),
2843                             pf->offset_loaded, &os->rx_size_127,
2844                             &ns->rx_size_127);
2845         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2846                             I40E_GLPRT_PRC255L(hw->port),
2847                             pf->offset_loaded, &os->rx_size_255,
2848                             &ns->rx_size_255);
2849         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2850                             I40E_GLPRT_PRC511L(hw->port),
2851                             pf->offset_loaded, &os->rx_size_511,
2852                             &ns->rx_size_511);
2853         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2854                             I40E_GLPRT_PRC1023L(hw->port),
2855                             pf->offset_loaded, &os->rx_size_1023,
2856                             &ns->rx_size_1023);
2857         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2858                             I40E_GLPRT_PRC1522L(hw->port),
2859                             pf->offset_loaded, &os->rx_size_1522,
2860                             &ns->rx_size_1522);
2861         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2862                             I40E_GLPRT_PRC9522L(hw->port),
2863                             pf->offset_loaded, &os->rx_size_big,
2864                             &ns->rx_size_big);
2865         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2866                             pf->offset_loaded, &os->rx_undersize,
2867                             &ns->rx_undersize);
2868         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2869                             pf->offset_loaded, &os->rx_fragments,
2870                             &ns->rx_fragments);
2871         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2872                             pf->offset_loaded, &os->rx_oversize,
2873                             &ns->rx_oversize);
2874         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2875                             pf->offset_loaded, &os->rx_jabber,
2876                             &ns->rx_jabber);
2877         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2878                             I40E_GLPRT_PTC64L(hw->port),
2879                             pf->offset_loaded, &os->tx_size_64,
2880                             &ns->tx_size_64);
2881         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2882                             I40E_GLPRT_PTC127L(hw->port),
2883                             pf->offset_loaded, &os->tx_size_127,
2884                             &ns->tx_size_127);
2885         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2886                             I40E_GLPRT_PTC255L(hw->port),
2887                             pf->offset_loaded, &os->tx_size_255,
2888                             &ns->tx_size_255);
2889         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2890                             I40E_GLPRT_PTC511L(hw->port),
2891                             pf->offset_loaded, &os->tx_size_511,
2892                             &ns->tx_size_511);
2893         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2894                             I40E_GLPRT_PTC1023L(hw->port),
2895                             pf->offset_loaded, &os->tx_size_1023,
2896                             &ns->tx_size_1023);
2897         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2898                             I40E_GLPRT_PTC1522L(hw->port),
2899                             pf->offset_loaded, &os->tx_size_1522,
2900                             &ns->tx_size_1522);
2901         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2902                             I40E_GLPRT_PTC9522L(hw->port),
2903                             pf->offset_loaded, &os->tx_size_big,
2904                             &ns->tx_size_big);
2905         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2906                            pf->offset_loaded,
2907                            &os->fd_sb_match, &ns->fd_sb_match);
2908         /* GLPRT_MSPDC not supported */
2909         /* GLPRT_XEC not supported */
2910
2911         pf->offset_loaded = true;
2912
2913         if (pf->main_vsi)
2914                 i40e_update_vsi_stats(pf->main_vsi);
2915 }
2916
2917 /* Get all statistics of a port */
2918 static int
2919 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2920 {
2921         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2922         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2923         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2924         unsigned i;
2925
2926         /* call read registers - updates values, now write them to struct */
2927         i40e_read_stats_registers(pf, hw);
2928
2929         stats->ipackets = ns->eth.rx_unicast +
2930                         ns->eth.rx_multicast +
2931                         ns->eth.rx_broadcast -
2932                         ns->eth.rx_discards -
2933                         pf->main_vsi->eth_stats.rx_discards;
2934         stats->opackets = ns->eth.tx_unicast +
2935                         ns->eth.tx_multicast +
2936                         ns->eth.tx_broadcast;
2937         stats->ibytes   = ns->eth.rx_bytes;
2938         stats->obytes   = ns->eth.tx_bytes;
2939         stats->oerrors  = ns->eth.tx_errors +
2940                         pf->main_vsi->eth_stats.tx_errors;
2941
2942         /* Rx Errors */
2943         stats->imissed  = ns->eth.rx_discards +
2944                         pf->main_vsi->eth_stats.rx_discards;
2945         stats->ierrors  = ns->crc_errors +
2946                         ns->rx_length_errors + ns->rx_undersize +
2947                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2948
2949         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2950         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2951         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2952         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2953         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2954         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2955         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2956                     ns->eth.rx_unknown_protocol);
2957         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2958         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2959         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2960         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2961         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2962         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2963
2964         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2965                     ns->tx_dropped_link_down);
2966         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2967         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2968                     ns->illegal_bytes);
2969         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2970         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2971                     ns->mac_local_faults);
2972         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2973                     ns->mac_remote_faults);
2974         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2975                     ns->rx_length_errors);
2976         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2977         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2978         for (i = 0; i < 8; i++) {
2979                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
2980                                 i, ns->priority_xon_rx[i]);
2981                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
2982                                 i, ns->priority_xoff_rx[i]);
2983         }
2984         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
2985         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
2986         for (i = 0; i < 8; i++) {
2987                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
2988                                 i, ns->priority_xon_tx[i]);
2989                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
2990                                 i, ns->priority_xoff_tx[i]);
2991                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
2992                                 i, ns->priority_xon_2_xoff[i]);
2993         }
2994         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
2995         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
2996         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
2997         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
2998         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
2999         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3000         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3001         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3002         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3003         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3004         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3005         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3006         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3007         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3008         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3009         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3010         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3011         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3012         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3013                         ns->mac_short_packet_dropped);
3014         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3015                     ns->checksum_error);
3016         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3017         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3018         return 0;
3019 }
3020
3021 /* Reset the statistics */
3022 static void
3023 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3024 {
3025         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3026         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3027
3028         /* Mark PF and VSI stats to update the offset, aka "reset" */
3029         pf->offset_loaded = false;
3030         if (pf->main_vsi)
3031                 pf->main_vsi->offset_loaded = false;
3032
3033         /* read the stats, reading current register values into offset */
3034         i40e_read_stats_registers(pf, hw);
3035 }
3036
3037 static uint32_t
3038 i40e_xstats_calc_num(void)
3039 {
3040         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3041                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3042                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3043 }
3044
3045 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3046                                      struct rte_eth_xstat_name *xstats_names,
3047                                      __rte_unused unsigned limit)
3048 {
3049         unsigned count = 0;
3050         unsigned i, prio;
3051
3052         if (xstats_names == NULL)
3053                 return i40e_xstats_calc_num();
3054
3055         /* Note: limit checked in rte_eth_xstats_names() */
3056
3057         /* Get stats from i40e_eth_stats struct */
3058         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3059                 snprintf(xstats_names[count].name,
3060                          sizeof(xstats_names[count].name),
3061                          "%s", rte_i40e_stats_strings[i].name);
3062                 count++;
3063         }
3064
3065         /* Get individiual stats from i40e_hw_port struct */
3066         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3067                 snprintf(xstats_names[count].name,
3068                         sizeof(xstats_names[count].name),
3069                          "%s", rte_i40e_hw_port_strings[i].name);
3070                 count++;
3071         }
3072
3073         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3074                 for (prio = 0; prio < 8; prio++) {
3075                         snprintf(xstats_names[count].name,
3076                                  sizeof(xstats_names[count].name),
3077                                  "rx_priority%u_%s", prio,
3078                                  rte_i40e_rxq_prio_strings[i].name);
3079                         count++;
3080                 }
3081         }
3082
3083         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3084                 for (prio = 0; prio < 8; prio++) {
3085                         snprintf(xstats_names[count].name,
3086                                  sizeof(xstats_names[count].name),
3087                                  "tx_priority%u_%s", prio,
3088                                  rte_i40e_txq_prio_strings[i].name);
3089                         count++;
3090                 }
3091         }
3092         return count;
3093 }
3094
3095 static int
3096 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3097                     unsigned n)
3098 {
3099         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3100         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3101         unsigned i, count, prio;
3102         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3103
3104         count = i40e_xstats_calc_num();
3105         if (n < count)
3106                 return count;
3107
3108         i40e_read_stats_registers(pf, hw);
3109
3110         if (xstats == NULL)
3111                 return 0;
3112
3113         count = 0;
3114
3115         /* Get stats from i40e_eth_stats struct */
3116         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3117                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3118                         rte_i40e_stats_strings[i].offset);
3119                 xstats[count].id = count;
3120                 count++;
3121         }
3122
3123         /* Get individiual stats from i40e_hw_port struct */
3124         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3125                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3126                         rte_i40e_hw_port_strings[i].offset);
3127                 xstats[count].id = count;
3128                 count++;
3129         }
3130
3131         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3132                 for (prio = 0; prio < 8; prio++) {
3133                         xstats[count].value =
3134                                 *(uint64_t *)(((char *)hw_stats) +
3135                                 rte_i40e_rxq_prio_strings[i].offset +
3136                                 (sizeof(uint64_t) * prio));
3137                         xstats[count].id = count;
3138                         count++;
3139                 }
3140         }
3141
3142         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3143                 for (prio = 0; prio < 8; prio++) {
3144                         xstats[count].value =
3145                                 *(uint64_t *)(((char *)hw_stats) +
3146                                 rte_i40e_txq_prio_strings[i].offset +
3147                                 (sizeof(uint64_t) * prio));
3148                         xstats[count].id = count;
3149                         count++;
3150                 }
3151         }
3152
3153         return count;
3154 }
3155
3156 static int
3157 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
3158                                  __rte_unused uint16_t queue_id,
3159                                  __rte_unused uint8_t stat_idx,
3160                                  __rte_unused uint8_t is_rx)
3161 {
3162         PMD_INIT_FUNC_TRACE();
3163
3164         return -ENOSYS;
3165 }
3166
3167 static int
3168 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3169 {
3170         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3171         u32 full_ver;
3172         u8 ver, patch;
3173         u16 build;
3174         int ret;
3175
3176         full_ver = hw->nvm.oem_ver;
3177         ver = (u8)(full_ver >> 24);
3178         build = (u16)((full_ver >> 8) & 0xffff);
3179         patch = (u8)(full_ver & 0xff);
3180
3181         ret = snprintf(fw_version, fw_size,
3182                  "%d.%d%d 0x%08x %d.%d.%d",
3183                  ((hw->nvm.version >> 12) & 0xf),
3184                  ((hw->nvm.version >> 4) & 0xff),
3185                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3186                  ver, build, patch);
3187
3188         ret += 1; /* add the size of '\0' */
3189         if (fw_size < (u32)ret)
3190                 return ret;
3191         else
3192                 return 0;
3193 }
3194
3195 static void
3196 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3197 {
3198         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3199         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3200         struct i40e_vsi *vsi = pf->main_vsi;
3201         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3202
3203         dev_info->pci_dev = pci_dev;
3204         dev_info->max_rx_queues = vsi->nb_qps;
3205         dev_info->max_tx_queues = vsi->nb_qps;
3206         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3207         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3208         dev_info->max_mac_addrs = vsi->max_macaddrs;
3209         dev_info->max_vfs = pci_dev->max_vfs;
3210         dev_info->rx_offload_capa =
3211                 DEV_RX_OFFLOAD_VLAN_STRIP |
3212                 DEV_RX_OFFLOAD_QINQ_STRIP |
3213                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3214                 DEV_RX_OFFLOAD_UDP_CKSUM |
3215                 DEV_RX_OFFLOAD_TCP_CKSUM |
3216                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3217                 DEV_RX_OFFLOAD_CRC_STRIP;
3218         dev_info->tx_offload_capa =
3219                 DEV_TX_OFFLOAD_VLAN_INSERT |
3220                 DEV_TX_OFFLOAD_QINQ_INSERT |
3221                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3222                 DEV_TX_OFFLOAD_UDP_CKSUM |
3223                 DEV_TX_OFFLOAD_TCP_CKSUM |
3224                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3225                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3226                 DEV_TX_OFFLOAD_TCP_TSO |
3227                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3228                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3229                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3230                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
3231         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3232                                                 sizeof(uint32_t);
3233         dev_info->reta_size = pf->hash_lut_size;
3234         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3235
3236         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3237                 .rx_thresh = {
3238                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3239                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3240                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3241                 },
3242                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3243                 .rx_drop_en = 0,
3244         };
3245
3246         dev_info->default_txconf = (struct rte_eth_txconf) {
3247                 .tx_thresh = {
3248                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3249                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3250                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3251                 },
3252                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3253                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3254                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3255                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3256         };
3257
3258         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3259                 .nb_max = I40E_MAX_RING_DESC,
3260                 .nb_min = I40E_MIN_RING_DESC,
3261                 .nb_align = I40E_ALIGN_RING_DESC,
3262         };
3263
3264         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3265                 .nb_max = I40E_MAX_RING_DESC,
3266                 .nb_min = I40E_MIN_RING_DESC,
3267                 .nb_align = I40E_ALIGN_RING_DESC,
3268                 .nb_seg_max = I40E_TX_MAX_SEG,
3269                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3270         };
3271
3272         if (pf->flags & I40E_FLAG_VMDQ) {
3273                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3274                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3275                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3276                                                 pf->max_nb_vmdq_vsi;
3277                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3278                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3279                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3280         }
3281
3282         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
3283                 /* For XL710 */
3284                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3285         else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
3286                 /* For XXV710 */
3287                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3288         else
3289                 /* For X710 */
3290                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3291 }
3292
3293 static int
3294 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3295 {
3296         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3297         struct i40e_vsi *vsi = pf->main_vsi;
3298         PMD_INIT_FUNC_TRACE();
3299
3300         if (on)
3301                 return i40e_vsi_add_vlan(vsi, vlan_id);
3302         else
3303                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3304 }
3305
3306 static int
3307 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3308                                 enum rte_vlan_type vlan_type,
3309                                 uint16_t tpid, int qinq)
3310 {
3311         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3312         uint64_t reg_r = 0;
3313         uint64_t reg_w = 0;
3314         uint16_t reg_id = 3;
3315         int ret;
3316
3317         if (qinq) {
3318                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3319                         reg_id = 2;
3320         }
3321
3322         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3323                                           &reg_r, NULL);
3324         if (ret != I40E_SUCCESS) {
3325                 PMD_DRV_LOG(ERR,
3326                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3327                            reg_id);
3328                 return -EIO;
3329         }
3330         PMD_DRV_LOG(DEBUG,
3331                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3332                     reg_id, reg_r);
3333
3334         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3335         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3336         if (reg_r == reg_w) {
3337                 PMD_DRV_LOG(DEBUG, "No need to write");
3338                 return 0;
3339         }
3340
3341         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3342                                            reg_w, NULL);
3343         if (ret != I40E_SUCCESS) {
3344                 PMD_DRV_LOG(ERR,
3345                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3346                             reg_id);
3347                 return -EIO;
3348         }
3349         PMD_DRV_LOG(DEBUG,
3350                     "Global register 0x%08x is changed with value 0x%08x",
3351                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3352
3353         return 0;
3354 }
3355
3356 static int
3357 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3358                    enum rte_vlan_type vlan_type,
3359                    uint16_t tpid)
3360 {
3361         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3362         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3363         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
3364         int ret = 0;
3365
3366         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3367              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3368             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3369                 PMD_DRV_LOG(ERR,
3370                             "Unsupported vlan type.");
3371                 return -EINVAL;
3372         }
3373
3374         if (pf->support_multi_driver) {
3375                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3376                 return -ENOTSUP;
3377         }
3378
3379         /* 802.1ad frames ability is added in NVM API 1.7*/
3380         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3381                 if (qinq) {
3382                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3383                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3384                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3385                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3386                 } else {
3387                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3388                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3389                 }
3390                 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3391                 if (ret != I40E_SUCCESS) {
3392                         PMD_DRV_LOG(ERR,
3393                                     "Set switch config failed aq_err: %d",
3394                                     hw->aq.asq_last_status);
3395                         ret = -EIO;
3396                 }
3397         } else
3398                 /* If NVM API < 1.7, keep the register setting */
3399                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3400                                                       tpid, qinq);
3401         i40e_global_cfg_warning(I40E_WARNING_TPID);
3402
3403         return ret;
3404 }
3405
3406 static int
3407 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3408 {
3409         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3410         struct i40e_vsi *vsi = pf->main_vsi;
3411
3412         if (mask & ETH_VLAN_FILTER_MASK) {
3413                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3414                         i40e_vsi_config_vlan_filter(vsi, TRUE);
3415                 else
3416                         i40e_vsi_config_vlan_filter(vsi, FALSE);
3417         }
3418
3419         if (mask & ETH_VLAN_STRIP_MASK) {
3420                 /* Enable or disable VLAN stripping */
3421                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
3422                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
3423                 else
3424                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
3425         }
3426
3427         if (mask & ETH_VLAN_EXTEND_MASK) {
3428                 if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
3429                         i40e_vsi_config_double_vlan(vsi, TRUE);
3430                         /* Set global registers with default ethertype. */
3431                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3432                                            ETHER_TYPE_VLAN);
3433                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3434                                            ETHER_TYPE_VLAN);
3435                 }
3436                 else
3437                         i40e_vsi_config_double_vlan(vsi, FALSE);
3438         }
3439
3440         return 0;
3441 }
3442
3443 static void
3444 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3445                           __rte_unused uint16_t queue,
3446                           __rte_unused int on)
3447 {
3448         PMD_INIT_FUNC_TRACE();
3449 }
3450
3451 static int
3452 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3453 {
3454         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3455         struct i40e_vsi *vsi = pf->main_vsi;
3456         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3457         struct i40e_vsi_vlan_pvid_info info;
3458
3459         memset(&info, 0, sizeof(info));
3460         info.on = on;
3461         if (info.on)
3462                 info.config.pvid = pvid;
3463         else {
3464                 info.config.reject.tagged =
3465                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
3466                 info.config.reject.untagged =
3467                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
3468         }
3469
3470         return i40e_vsi_vlan_pvid_set(vsi, &info);
3471 }
3472
3473 static int
3474 i40e_dev_led_on(struct rte_eth_dev *dev)
3475 {
3476         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3477         uint32_t mode = i40e_led_get(hw);
3478
3479         if (mode == 0)
3480                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3481
3482         return 0;
3483 }
3484
3485 static int
3486 i40e_dev_led_off(struct rte_eth_dev *dev)
3487 {
3488         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3489         uint32_t mode = i40e_led_get(hw);
3490
3491         if (mode != 0)
3492                 i40e_led_set(hw, 0, false);
3493
3494         return 0;
3495 }
3496
3497 static int
3498 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3499 {
3500         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3501         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3502
3503         fc_conf->pause_time = pf->fc_conf.pause_time;
3504
3505         /* read out from register, in case they are modified by other port */
3506         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3507                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3508         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3509                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3510
3511         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3512         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3513
3514          /* Return current mode according to actual setting*/
3515         switch (hw->fc.current_mode) {
3516         case I40E_FC_FULL:
3517                 fc_conf->mode = RTE_FC_FULL;
3518                 break;
3519         case I40E_FC_TX_PAUSE:
3520                 fc_conf->mode = RTE_FC_TX_PAUSE;
3521                 break;
3522         case I40E_FC_RX_PAUSE:
3523                 fc_conf->mode = RTE_FC_RX_PAUSE;
3524                 break;
3525         case I40E_FC_NONE:
3526         default:
3527                 fc_conf->mode = RTE_FC_NONE;
3528         };
3529
3530         return 0;
3531 }
3532
3533 static int
3534 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3535 {
3536         uint32_t mflcn_reg, fctrl_reg, reg;
3537         uint32_t max_high_water;
3538         uint8_t i, aq_failure;
3539         int err;
3540         struct i40e_hw *hw;
3541         struct i40e_pf *pf;
3542         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3543                 [RTE_FC_NONE] = I40E_FC_NONE,
3544                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3545                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3546                 [RTE_FC_FULL] = I40E_FC_FULL
3547         };
3548
3549         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3550
3551         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3552         if ((fc_conf->high_water > max_high_water) ||
3553                         (fc_conf->high_water < fc_conf->low_water)) {
3554                 PMD_INIT_LOG(ERR,
3555                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
3556                         max_high_water);
3557                 return -EINVAL;
3558         }
3559
3560         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3561         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3562         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3563
3564         pf->fc_conf.pause_time = fc_conf->pause_time;
3565         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3566         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3567
3568         PMD_INIT_FUNC_TRACE();
3569
3570         /* All the link flow control related enable/disable register
3571          * configuration is handle by the F/W
3572          */
3573         err = i40e_set_fc(hw, &aq_failure, true);
3574         if (err < 0)
3575                 return -ENOSYS;
3576
3577         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3578                 /* Configure flow control refresh threshold,
3579                  * the value for stat_tx_pause_refresh_timer[8]
3580                  * is used for global pause operation.
3581                  */
3582
3583                 I40E_WRITE_REG(hw,
3584                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3585                                pf->fc_conf.pause_time);
3586
3587                 /* configure the timer value included in transmitted pause
3588                  * frame,
3589                  * the value for stat_tx_pause_quanta[8] is used for global
3590                  * pause operation
3591                  */
3592                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3593                                pf->fc_conf.pause_time);
3594
3595                 fctrl_reg = I40E_READ_REG(hw,
3596                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3597
3598                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3599                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3600                 else
3601                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3602
3603                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3604                                fctrl_reg);
3605         } else {
3606                 /* Configure pause time (2 TCs per register) */
3607                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3608                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3609                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3610
3611                 /* Configure flow control refresh threshold value */
3612                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3613                                pf->fc_conf.pause_time / 2);
3614
3615                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3616
3617                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3618                  *depending on configuration
3619                  */
3620                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3621                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3622                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3623                 } else {
3624                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3625                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3626                 }
3627
3628                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3629         }
3630
3631         if (!pf->support_multi_driver) {
3632                 /* config water marker both based on the packets and bytes */
3633                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
3634                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3635                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3636                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
3637                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3638                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3639                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
3640                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3641                                   << I40E_KILOSHIFT);
3642                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
3643                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3644                                    << I40E_KILOSHIFT);
3645                 i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
3646         } else {
3647                 PMD_DRV_LOG(ERR,
3648                             "Water marker configuration is not supported.");
3649         }
3650
3651         I40E_WRITE_FLUSH(hw);
3652
3653         return 0;
3654 }
3655
3656 static int
3657 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3658                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3659 {
3660         PMD_INIT_FUNC_TRACE();
3661
3662         return -ENOSYS;
3663 }
3664
3665 /* Add a MAC address, and update filters */
3666 static int
3667 i40e_macaddr_add(struct rte_eth_dev *dev,
3668                  struct ether_addr *mac_addr,
3669                  __rte_unused uint32_t index,
3670                  uint32_t pool)
3671 {
3672         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3673         struct i40e_mac_filter_info mac_filter;
3674         struct i40e_vsi *vsi;
3675         int ret;
3676
3677         /* If VMDQ not enabled or configured, return */
3678         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3679                           !pf->nb_cfg_vmdq_vsi)) {
3680                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3681                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3682                         pool);
3683                 return -ENOTSUP;
3684         }
3685
3686         if (pool > pf->nb_cfg_vmdq_vsi) {
3687                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3688                                 pool, pf->nb_cfg_vmdq_vsi);
3689                 return -EINVAL;
3690         }
3691
3692         rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3693         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3694                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3695         else
3696                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3697
3698         if (pool == 0)
3699                 vsi = pf->main_vsi;
3700         else
3701                 vsi = pf->vmdq[pool - 1].vsi;
3702
3703         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3704         if (ret != I40E_SUCCESS) {
3705                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3706                 return -ENODEV;
3707         }
3708         return 0;
3709 }
3710
3711 /* Remove a MAC address, and update filters */
3712 static void
3713 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3714 {
3715         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3716         struct i40e_vsi *vsi;
3717         struct rte_eth_dev_data *data = dev->data;
3718         struct ether_addr *macaddr;
3719         int ret;
3720         uint32_t i;
3721         uint64_t pool_sel;
3722
3723         macaddr = &(data->mac_addrs[index]);
3724
3725         pool_sel = dev->data->mac_pool_sel[index];
3726
3727         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3728                 if (pool_sel & (1ULL << i)) {
3729                         if (i == 0)
3730                                 vsi = pf->main_vsi;
3731                         else {
3732                                 /* No VMDQ pool enabled or configured */
3733                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3734                                         (i > pf->nb_cfg_vmdq_vsi)) {
3735                                         PMD_DRV_LOG(ERR,
3736                                                 "No VMDQ pool enabled/configured");
3737                                         return;
3738                                 }
3739                                 vsi = pf->vmdq[i - 1].vsi;
3740                         }
3741                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3742
3743                         if (ret) {
3744                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3745                                 return;
3746                         }
3747                 }
3748         }
3749 }
3750
3751 /* Set perfect match or hash match of MAC and VLAN for a VF */
3752 static int
3753 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3754                  struct rte_eth_mac_filter *filter,
3755                  bool add)
3756 {
3757         struct i40e_hw *hw;
3758         struct i40e_mac_filter_info mac_filter;
3759         struct ether_addr old_mac;
3760         struct ether_addr *new_mac;
3761         struct i40e_pf_vf *vf = NULL;
3762         uint16_t vf_id;
3763         int ret;
3764
3765         if (pf == NULL) {
3766                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3767                 return -EINVAL;
3768         }
3769         hw = I40E_PF_TO_HW(pf);
3770
3771         if (filter == NULL) {
3772                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3773                 return -EINVAL;
3774         }
3775
3776         new_mac = &filter->mac_addr;
3777
3778         if (is_zero_ether_addr(new_mac)) {
3779                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3780                 return -EINVAL;
3781         }
3782
3783         vf_id = filter->dst_id;
3784
3785         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3786                 PMD_DRV_LOG(ERR, "Invalid argument.");
3787                 return -EINVAL;
3788         }
3789         vf = &pf->vfs[vf_id];
3790
3791         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3792                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3793                 return -EINVAL;
3794         }
3795
3796         if (add) {
3797                 rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3798                 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3799                                 ETHER_ADDR_LEN);
3800                 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3801                                  ETHER_ADDR_LEN);
3802
3803                 mac_filter.filter_type = filter->filter_type;
3804                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3805                 if (ret != I40E_SUCCESS) {
3806                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3807                         return -1;
3808                 }
3809                 ether_addr_copy(new_mac, &pf->dev_addr);
3810         } else {
3811                 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3812                                 ETHER_ADDR_LEN);
3813                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3814                 if (ret != I40E_SUCCESS) {
3815                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3816                         return -1;
3817                 }
3818
3819                 /* Clear device address as it has been removed */
3820                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3821                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3822         }
3823
3824         return 0;
3825 }
3826
3827 /* MAC filter handle */
3828 static int
3829 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3830                 void *arg)
3831 {
3832         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3833         struct rte_eth_mac_filter *filter;
3834         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3835         int ret = I40E_NOT_SUPPORTED;
3836
3837         filter = (struct rte_eth_mac_filter *)(arg);
3838
3839         switch (filter_op) {
3840         case RTE_ETH_FILTER_NOP:
3841                 ret = I40E_SUCCESS;
3842                 break;
3843         case RTE_ETH_FILTER_ADD:
3844                 i40e_pf_disable_irq0(hw);
3845                 if (filter->is_vf)
3846                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3847                 i40e_pf_enable_irq0(hw);
3848                 break;
3849         case RTE_ETH_FILTER_DELETE:
3850                 i40e_pf_disable_irq0(hw);
3851                 if (filter->is_vf)
3852                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3853                 i40e_pf_enable_irq0(hw);
3854                 break;
3855         default:
3856                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3857                 ret = I40E_ERR_PARAM;
3858                 break;
3859         }
3860
3861         return ret;
3862 }
3863
3864 static int
3865 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3866 {
3867         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3868         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3869         uint32_t reg;
3870         int ret;
3871
3872         if (!lut)
3873                 return -EINVAL;
3874
3875         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3876                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3877                                           lut, lut_size);
3878                 if (ret) {
3879                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3880                         return ret;
3881                 }
3882         } else {
3883                 uint32_t *lut_dw = (uint32_t *)lut;
3884                 uint16_t i, lut_size_dw = lut_size / 4;
3885
3886                 if (vsi->type == I40E_VSI_SRIOV) {
3887                         for (i = 0; i <= lut_size_dw; i++) {
3888                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
3889                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
3890                         }
3891                 } else {
3892                         for (i = 0; i < lut_size_dw; i++)
3893                                 lut_dw[i] = I40E_READ_REG(hw,
3894                                                           I40E_PFQF_HLUT(i));
3895                 }
3896         }
3897
3898         return 0;
3899 }
3900
3901 int
3902 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3903 {
3904         struct i40e_pf *pf;
3905         struct i40e_hw *hw;
3906         int ret;
3907
3908         if (!vsi || !lut)
3909                 return -EINVAL;
3910
3911         pf = I40E_VSI_TO_PF(vsi);
3912         hw = I40E_VSI_TO_HW(vsi);
3913
3914         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3915                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
3916                                           lut, lut_size);
3917                 if (ret) {
3918                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3919                         return ret;
3920                 }
3921         } else {
3922                 uint32_t *lut_dw = (uint32_t *)lut;
3923                 uint16_t i, lut_size_dw = lut_size / 4;
3924
3925                 if (vsi->type == I40E_VSI_SRIOV) {
3926                         for (i = 0; i < lut_size_dw; i++)
3927                                 I40E_WRITE_REG(
3928                                         hw,
3929                                         I40E_VFQF_HLUT1(i, vsi->user_param),
3930                                         lut_dw[i]);
3931                 } else {
3932                         for (i = 0; i < lut_size_dw; i++)
3933                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
3934                                                lut_dw[i]);
3935                 }
3936                 I40E_WRITE_FLUSH(hw);
3937         }
3938
3939         return 0;
3940 }
3941
3942 static int
3943 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
3944                          struct rte_eth_rss_reta_entry64 *reta_conf,
3945                          uint16_t reta_size)
3946 {
3947         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3948         uint16_t i, lut_size = pf->hash_lut_size;
3949         uint16_t idx, shift;
3950         uint8_t *lut;
3951         int ret;
3952
3953         if (reta_size != lut_size ||
3954                 reta_size > ETH_RSS_RETA_SIZE_512) {
3955                 PMD_DRV_LOG(ERR,
3956                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3957                         reta_size, lut_size);
3958                 return -EINVAL;
3959         }
3960
3961         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3962         if (!lut) {
3963                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3964                 return -ENOMEM;
3965         }
3966         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3967         if (ret)
3968                 goto out;
3969         for (i = 0; i < reta_size; i++) {
3970                 idx = i / RTE_RETA_GROUP_SIZE;
3971                 shift = i % RTE_RETA_GROUP_SIZE;
3972                 if (reta_conf[idx].mask & (1ULL << shift))
3973                         lut[i] = reta_conf[idx].reta[shift];
3974         }
3975         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
3976
3977 out:
3978         rte_free(lut);
3979
3980         return ret;
3981 }
3982
3983 static int
3984 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
3985                         struct rte_eth_rss_reta_entry64 *reta_conf,
3986                         uint16_t reta_size)
3987 {
3988         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3989         uint16_t i, lut_size = pf->hash_lut_size;
3990         uint16_t idx, shift;
3991         uint8_t *lut;
3992         int ret;
3993
3994         if (reta_size != lut_size ||
3995                 reta_size > ETH_RSS_RETA_SIZE_512) {
3996                 PMD_DRV_LOG(ERR,
3997                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3998                         reta_size, lut_size);
3999                 return -EINVAL;
4000         }
4001
4002         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4003         if (!lut) {
4004                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4005                 return -ENOMEM;
4006         }
4007
4008         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4009         if (ret)
4010                 goto out;
4011         for (i = 0; i < reta_size; i++) {
4012                 idx = i / RTE_RETA_GROUP_SIZE;
4013                 shift = i % RTE_RETA_GROUP_SIZE;
4014                 if (reta_conf[idx].mask & (1ULL << shift))
4015                         reta_conf[idx].reta[shift] = lut[i];
4016         }
4017
4018 out:
4019         rte_free(lut);
4020
4021         return ret;
4022 }
4023
4024 /**
4025  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4026  * @hw:   pointer to the HW structure
4027  * @mem:  pointer to mem struct to fill out
4028  * @size: size of memory requested
4029  * @alignment: what to align the allocation to
4030  **/
4031 enum i40e_status_code
4032 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4033                         struct i40e_dma_mem *mem,
4034                         u64 size,
4035                         u32 alignment)
4036 {
4037         const struct rte_memzone *mz = NULL;
4038         char z_name[RTE_MEMZONE_NAMESIZE];
4039
4040         if (!mem)
4041                 return I40E_ERR_PARAM;
4042
4043         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4044         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
4045                                          alignment, RTE_PGSIZE_2M);
4046         if (!mz)
4047                 return I40E_ERR_NO_MEMORY;
4048
4049         mem->size = size;
4050         mem->va = mz->addr;
4051         mem->pa = mz->iova;
4052         mem->zone = (const void *)mz;
4053         PMD_DRV_LOG(DEBUG,
4054                 "memzone %s allocated with physical address: %"PRIu64,
4055                 mz->name, mem->pa);
4056
4057         return I40E_SUCCESS;
4058 }
4059
4060 /**
4061  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4062  * @hw:   pointer to the HW structure
4063  * @mem:  ptr to mem struct to free
4064  **/
4065 enum i40e_status_code
4066 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4067                     struct i40e_dma_mem *mem)
4068 {
4069         if (!mem)
4070                 return I40E_ERR_PARAM;
4071
4072         PMD_DRV_LOG(DEBUG,
4073                 "memzone %s to be freed with physical address: %"PRIu64,
4074                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4075         rte_memzone_free((const struct rte_memzone *)mem->zone);
4076         mem->zone = NULL;
4077         mem->va = NULL;
4078         mem->pa = (u64)0;
4079
4080         return I40E_SUCCESS;
4081 }
4082
4083 /**
4084  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4085  * @hw:   pointer to the HW structure
4086  * @mem:  pointer to mem struct to fill out
4087  * @size: size of memory requested
4088  **/
4089 enum i40e_status_code
4090 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4091                          struct i40e_virt_mem *mem,
4092                          u32 size)
4093 {
4094         if (!mem)
4095                 return I40E_ERR_PARAM;
4096
4097         mem->size = size;
4098         mem->va = rte_zmalloc("i40e", size, 0);
4099
4100         if (mem->va)
4101                 return I40E_SUCCESS;
4102         else
4103                 return I40E_ERR_NO_MEMORY;
4104 }
4105
4106 /**
4107  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4108  * @hw:   pointer to the HW structure
4109  * @mem:  pointer to mem struct to free
4110  **/
4111 enum i40e_status_code
4112 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4113                      struct i40e_virt_mem *mem)
4114 {
4115         if (!mem)
4116                 return I40E_ERR_PARAM;
4117
4118         rte_free(mem->va);
4119         mem->va = NULL;
4120
4121         return I40E_SUCCESS;
4122 }
4123
4124 void
4125 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4126 {
4127         rte_spinlock_init(&sp->spinlock);
4128 }
4129
4130 void
4131 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4132 {
4133         rte_spinlock_lock(&sp->spinlock);
4134 }
4135
4136 void
4137 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4138 {
4139         rte_spinlock_unlock(&sp->spinlock);
4140 }
4141
4142 void
4143 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
4144 {
4145         return;
4146 }
4147
4148 /**
4149  * Get the hardware capabilities, which will be parsed
4150  * and saved into struct i40e_hw.
4151  */
4152 static int
4153 i40e_get_cap(struct i40e_hw *hw)
4154 {
4155         struct i40e_aqc_list_capabilities_element_resp *buf;
4156         uint16_t len, size = 0;
4157         int ret;
4158
4159         /* Calculate a huge enough buff for saving response data temporarily */
4160         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4161                                                 I40E_MAX_CAP_ELE_NUM;
4162         buf = rte_zmalloc("i40e", len, 0);
4163         if (!buf) {
4164                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4165                 return I40E_ERR_NO_MEMORY;
4166         }
4167
4168         /* Get, parse the capabilities and save it to hw */
4169         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4170                         i40e_aqc_opc_list_func_capabilities, NULL);
4171         if (ret != I40E_SUCCESS)
4172                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4173
4174         /* Free the temporary buffer after being used */
4175         rte_free(buf);
4176
4177         return ret;
4178 }
4179
4180 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4181 #define QUEUE_NUM_PER_VF_ARG                    "queue-num-per-vf"
4182
4183 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4184                 const char *value,
4185                 void *opaque)
4186 {
4187         struct i40e_pf *pf;
4188         unsigned long num;
4189         char *end;
4190
4191         pf = (struct i40e_pf *)opaque;
4192         RTE_SET_USED(key);
4193
4194         errno = 0;
4195         num = strtoul(value, &end, 0);
4196         if (errno != 0 || end == value || *end != 0) {
4197                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4198                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4199                 return -(EINVAL);
4200         }
4201
4202         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4203                 pf->vf_nb_qp_max = (uint16_t)num;
4204         else
4205                 /* here return 0 to make next valid same argument work */
4206                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4207                             "power of 2 and equal or less than 16 !, Now it is "
4208                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4209
4210         return 0;
4211 }
4212
4213 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4214 {
4215         static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
4216         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4217         struct rte_kvargs *kvlist;
4218
4219         /* set default queue number per VF as 4 */
4220         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4221
4222         if (dev->device->devargs == NULL)
4223                 return 0;
4224
4225         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4226         if (kvlist == NULL)
4227                 return -(EINVAL);
4228
4229         if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
4230                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4231                             "the first invalid or last valid one is used !",
4232                             QUEUE_NUM_PER_VF_ARG);
4233
4234         rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
4235                            i40e_pf_parse_vf_queue_number_handler, pf);
4236
4237         rte_kvargs_free(kvlist);
4238
4239         return 0;
4240 }
4241
4242 static int
4243 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4244 {
4245         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4246         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4247         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4248         uint16_t qp_count = 0, vsi_count = 0;
4249
4250         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4251                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4252                 return -EINVAL;
4253         }
4254
4255         i40e_pf_config_vf_rxq_number(dev);
4256
4257         /* Add the parameter init for LFC */
4258         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4259         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4260         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4261
4262         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4263         pf->max_num_vsi = hw->func_caps.num_vsis;
4264         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4265         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4266
4267         /* FDir queue/VSI allocation */
4268         pf->fdir_qp_offset = 0;
4269         if (hw->func_caps.fd) {
4270                 pf->flags |= I40E_FLAG_FDIR;
4271                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4272         } else {
4273                 pf->fdir_nb_qps = 0;
4274         }
4275         qp_count += pf->fdir_nb_qps;
4276         vsi_count += 1;
4277
4278         /* LAN queue/VSI allocation */
4279         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4280         if (!hw->func_caps.rss) {
4281                 pf->lan_nb_qps = 1;
4282         } else {
4283                 pf->flags |= I40E_FLAG_RSS;
4284                 if (hw->mac.type == I40E_MAC_X722)
4285                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4286                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4287         }
4288         qp_count += pf->lan_nb_qps;
4289         vsi_count += 1;
4290
4291         /* VF queue/VSI allocation */
4292         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4293         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4294                 pf->flags |= I40E_FLAG_SRIOV;
4295                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4296                 pf->vf_num = pci_dev->max_vfs;
4297                 PMD_DRV_LOG(DEBUG,
4298                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4299                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4300         } else {
4301                 pf->vf_nb_qps = 0;
4302                 pf->vf_num = 0;
4303         }
4304         qp_count += pf->vf_nb_qps * pf->vf_num;
4305         vsi_count += pf->vf_num;
4306
4307         /* VMDq queue/VSI allocation */
4308         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4309         pf->vmdq_nb_qps = 0;
4310         pf->max_nb_vmdq_vsi = 0;
4311         if (hw->func_caps.vmdq) {
4312                 if (qp_count < hw->func_caps.num_tx_qp &&
4313                         vsi_count < hw->func_caps.num_vsis) {
4314                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4315                                 qp_count) / pf->vmdq_nb_qp_max;
4316
4317                         /* Limit the maximum number of VMDq vsi to the maximum
4318                          * ethdev can support
4319                          */
4320                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4321                                 hw->func_caps.num_vsis - vsi_count);
4322                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4323                                 ETH_64_POOLS);
4324                         if (pf->max_nb_vmdq_vsi) {
4325                                 pf->flags |= I40E_FLAG_VMDQ;
4326                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4327                                 PMD_DRV_LOG(DEBUG,
4328                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4329                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4330                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4331                         } else {
4332                                 PMD_DRV_LOG(INFO,
4333                                         "No enough queues left for VMDq");
4334                         }
4335                 } else {
4336                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4337                 }
4338         }
4339         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4340         vsi_count += pf->max_nb_vmdq_vsi;
4341
4342         if (hw->func_caps.dcb)
4343                 pf->flags |= I40E_FLAG_DCB;
4344
4345         if (qp_count > hw->func_caps.num_tx_qp) {
4346                 PMD_DRV_LOG(ERR,
4347                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4348                         qp_count, hw->func_caps.num_tx_qp);
4349                 return -EINVAL;
4350         }
4351         if (vsi_count > hw->func_caps.num_vsis) {
4352                 PMD_DRV_LOG(ERR,
4353                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4354                         vsi_count, hw->func_caps.num_vsis);
4355                 return -EINVAL;
4356         }
4357
4358         return 0;
4359 }
4360
4361 static int
4362 i40e_pf_get_switch_config(struct i40e_pf *pf)
4363 {
4364         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4365         struct i40e_aqc_get_switch_config_resp *switch_config;
4366         struct i40e_aqc_switch_config_element_resp *element;
4367         uint16_t start_seid = 0, num_reported;
4368         int ret;
4369
4370         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4371                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4372         if (!switch_config) {
4373                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4374                 return -ENOMEM;
4375         }
4376
4377         /* Get the switch configurations */
4378         ret = i40e_aq_get_switch_config(hw, switch_config,
4379                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4380         if (ret != I40E_SUCCESS) {
4381                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4382                 goto fail;
4383         }
4384         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4385         if (num_reported != 1) { /* The number should be 1 */
4386                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4387                 goto fail;
4388         }
4389
4390         /* Parse the switch configuration elements */
4391         element = &(switch_config->element[0]);
4392         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4393                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4394                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4395         } else
4396                 PMD_DRV_LOG(INFO, "Unknown element type");
4397
4398 fail:
4399         rte_free(switch_config);
4400
4401         return ret;
4402 }
4403
4404 static int
4405 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4406                         uint32_t num)
4407 {
4408         struct pool_entry *entry;
4409
4410         if (pool == NULL || num == 0)
4411                 return -EINVAL;
4412
4413         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4414         if (entry == NULL) {
4415                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4416                 return -ENOMEM;
4417         }
4418
4419         /* queue heap initialize */
4420         pool->num_free = num;
4421         pool->num_alloc = 0;
4422         pool->base = base;
4423         LIST_INIT(&pool->alloc_list);
4424         LIST_INIT(&pool->free_list);
4425
4426         /* Initialize element  */
4427         entry->base = 0;
4428         entry->len = num;
4429
4430         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4431         return 0;
4432 }
4433
4434 static void
4435 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4436 {
4437         struct pool_entry *entry, *next_entry;
4438
4439         if (pool == NULL)
4440                 return;
4441
4442         for (entry = LIST_FIRST(&pool->alloc_list);
4443                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4444                         entry = next_entry) {
4445                 LIST_REMOVE(entry, next);
4446                 rte_free(entry);
4447         }
4448
4449         for (entry = LIST_FIRST(&pool->free_list);
4450                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4451                         entry = next_entry) {
4452                 LIST_REMOVE(entry, next);
4453                 rte_free(entry);
4454         }
4455
4456         pool->num_free = 0;
4457         pool->num_alloc = 0;
4458         pool->base = 0;
4459         LIST_INIT(&pool->alloc_list);
4460         LIST_INIT(&pool->free_list);
4461 }
4462
4463 static int
4464 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4465                        uint32_t base)
4466 {
4467         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4468         uint32_t pool_offset;
4469         int insert;
4470
4471         if (pool == NULL) {
4472                 PMD_DRV_LOG(ERR, "Invalid parameter");
4473                 return -EINVAL;
4474         }
4475
4476         pool_offset = base - pool->base;
4477         /* Lookup in alloc list */
4478         LIST_FOREACH(entry, &pool->alloc_list, next) {
4479                 if (entry->base == pool_offset) {
4480                         valid_entry = entry;
4481                         LIST_REMOVE(entry, next);
4482                         break;
4483                 }
4484         }
4485
4486         /* Not find, return */
4487         if (valid_entry == NULL) {
4488                 PMD_DRV_LOG(ERR, "Failed to find entry");
4489                 return -EINVAL;
4490         }
4491
4492         /**
4493          * Found it, move it to free list  and try to merge.
4494          * In order to make merge easier, always sort it by qbase.
4495          * Find adjacent prev and last entries.
4496          */
4497         prev = next = NULL;
4498         LIST_FOREACH(entry, &pool->free_list, next) {
4499                 if (entry->base > valid_entry->base) {
4500                         next = entry;
4501                         break;
4502                 }
4503                 prev = entry;
4504         }
4505
4506         insert = 0;
4507         /* Try to merge with next one*/
4508         if (next != NULL) {
4509                 /* Merge with next one */
4510                 if (valid_entry->base + valid_entry->len == next->base) {
4511                         next->base = valid_entry->base;
4512                         next->len += valid_entry->len;
4513                         rte_free(valid_entry);
4514                         valid_entry = next;
4515                         insert = 1;
4516                 }
4517         }
4518
4519         if (prev != NULL) {
4520                 /* Merge with previous one */
4521                 if (prev->base + prev->len == valid_entry->base) {
4522                         prev->len += valid_entry->len;
4523                         /* If it merge with next one, remove next node */
4524                         if (insert == 1) {
4525                                 LIST_REMOVE(valid_entry, next);
4526                                 rte_free(valid_entry);
4527                         } else {
4528                                 rte_free(valid_entry);
4529                                 insert = 1;
4530                         }
4531                 }
4532         }
4533
4534         /* Not find any entry to merge, insert */
4535         if (insert == 0) {
4536                 if (prev != NULL)
4537                         LIST_INSERT_AFTER(prev, valid_entry, next);
4538                 else if (next != NULL)
4539                         LIST_INSERT_BEFORE(next, valid_entry, next);
4540                 else /* It's empty list, insert to head */
4541                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4542         }
4543
4544         pool->num_free += valid_entry->len;
4545         pool->num_alloc -= valid_entry->len;
4546
4547         return 0;
4548 }
4549
4550 static int
4551 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4552                        uint16_t num)
4553 {
4554         struct pool_entry *entry, *valid_entry;
4555
4556         if (pool == NULL || num == 0) {
4557                 PMD_DRV_LOG(ERR, "Invalid parameter");
4558                 return -EINVAL;
4559         }
4560
4561         if (pool->num_free < num) {
4562                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4563                             num, pool->num_free);
4564                 return -ENOMEM;
4565         }
4566
4567         valid_entry = NULL;
4568         /* Lookup  in free list and find most fit one */
4569         LIST_FOREACH(entry, &pool->free_list, next) {
4570                 if (entry->len >= num) {
4571                         /* Find best one */
4572                         if (entry->len == num) {
4573                                 valid_entry = entry;
4574                                 break;
4575                         }
4576                         if (valid_entry == NULL || valid_entry->len > entry->len)
4577                                 valid_entry = entry;
4578                 }
4579         }
4580
4581         /* Not find one to satisfy the request, return */
4582         if (valid_entry == NULL) {
4583                 PMD_DRV_LOG(ERR, "No valid entry found");
4584                 return -ENOMEM;
4585         }
4586         /**
4587          * The entry have equal queue number as requested,
4588          * remove it from alloc_list.
4589          */
4590         if (valid_entry->len == num) {
4591                 LIST_REMOVE(valid_entry, next);
4592         } else {
4593                 /**
4594                  * The entry have more numbers than requested,
4595                  * create a new entry for alloc_list and minus its
4596                  * queue base and number in free_list.
4597                  */
4598                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4599                 if (entry == NULL) {
4600                         PMD_DRV_LOG(ERR,
4601                                 "Failed to allocate memory for resource pool");
4602                         return -ENOMEM;
4603                 }
4604                 entry->base = valid_entry->base;
4605                 entry->len = num;
4606                 valid_entry->base += num;
4607                 valid_entry->len -= num;
4608                 valid_entry = entry;
4609         }
4610
4611         /* Insert it into alloc list, not sorted */
4612         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4613
4614         pool->num_free -= valid_entry->len;
4615         pool->num_alloc += valid_entry->len;
4616
4617         return valid_entry->base + pool->base;
4618 }
4619
4620 /**
4621  * bitmap_is_subset - Check whether src2 is subset of src1
4622  **/
4623 static inline int
4624 bitmap_is_subset(uint8_t src1, uint8_t src2)
4625 {
4626         return !((src1 ^ src2) & src2);
4627 }
4628
4629 static enum i40e_status_code
4630 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4631 {
4632         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4633
4634         /* If DCB is not supported, only default TC is supported */
4635         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4636                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4637                 return I40E_NOT_SUPPORTED;
4638         }
4639
4640         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4641                 PMD_DRV_LOG(ERR,
4642                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
4643                         hw->func_caps.enabled_tcmap, enabled_tcmap);
4644                 return I40E_NOT_SUPPORTED;
4645         }
4646         return I40E_SUCCESS;
4647 }
4648
4649 int
4650 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4651                                 struct i40e_vsi_vlan_pvid_info *info)
4652 {
4653         struct i40e_hw *hw;
4654         struct i40e_vsi_context ctxt;
4655         uint8_t vlan_flags = 0;
4656         int ret;
4657
4658         if (vsi == NULL || info == NULL) {
4659                 PMD_DRV_LOG(ERR, "invalid parameters");
4660                 return I40E_ERR_PARAM;
4661         }
4662
4663         if (info->on) {
4664                 vsi->info.pvid = info->config.pvid;
4665                 /**
4666                  * If insert pvid is enabled, only tagged pkts are
4667                  * allowed to be sent out.
4668                  */
4669                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4670                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4671         } else {
4672                 vsi->info.pvid = 0;
4673                 if (info->config.reject.tagged == 0)
4674                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4675
4676                 if (info->config.reject.untagged == 0)
4677                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4678         }
4679         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4680                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
4681         vsi->info.port_vlan_flags |= vlan_flags;
4682         vsi->info.valid_sections =
4683                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4684         memset(&ctxt, 0, sizeof(ctxt));
4685         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4686         ctxt.seid = vsi->seid;
4687
4688         hw = I40E_VSI_TO_HW(vsi);
4689         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4690         if (ret != I40E_SUCCESS)
4691                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4692
4693         return ret;
4694 }
4695
4696 static int
4697 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4698 {
4699         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4700         int i, ret;
4701         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4702
4703         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4704         if (ret != I40E_SUCCESS)
4705                 return ret;
4706
4707         if (!vsi->seid) {
4708                 PMD_DRV_LOG(ERR, "seid not valid");
4709                 return -EINVAL;
4710         }
4711
4712         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4713         tc_bw_data.tc_valid_bits = enabled_tcmap;
4714         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4715                 tc_bw_data.tc_bw_credits[i] =
4716                         (enabled_tcmap & (1 << i)) ? 1 : 0;
4717
4718         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4719         if (ret != I40E_SUCCESS) {
4720                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4721                 return ret;
4722         }
4723
4724         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4725                                         sizeof(vsi->info.qs_handle));
4726         return I40E_SUCCESS;
4727 }
4728
4729 static enum i40e_status_code
4730 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4731                                  struct i40e_aqc_vsi_properties_data *info,
4732                                  uint8_t enabled_tcmap)
4733 {
4734         enum i40e_status_code ret;
4735         int i, total_tc = 0;
4736         uint16_t qpnum_per_tc, bsf, qp_idx;
4737
4738         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4739         if (ret != I40E_SUCCESS)
4740                 return ret;
4741
4742         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4743                 if (enabled_tcmap & (1 << i))
4744                         total_tc++;
4745         if (total_tc == 0)
4746                 total_tc = 1;
4747         vsi->enabled_tc = enabled_tcmap;
4748
4749         /* Number of queues per enabled TC */
4750         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4751         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4752         bsf = rte_bsf32(qpnum_per_tc);
4753
4754         /* Adjust the queue number to actual queues that can be applied */
4755         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4756                 vsi->nb_qps = qpnum_per_tc * total_tc;
4757
4758         /**
4759          * Configure TC and queue mapping parameters, for enabled TC,
4760          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4761          * default queue will serve it.
4762          */
4763         qp_idx = 0;
4764         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4765                 if (vsi->enabled_tc & (1 << i)) {
4766                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4767                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4768                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4769                         qp_idx += qpnum_per_tc;
4770                 } else
4771                         info->tc_mapping[i] = 0;
4772         }
4773
4774         /* Associate queue number with VSI */
4775         if (vsi->type == I40E_VSI_SRIOV) {
4776                 info->mapping_flags |=
4777                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4778                 for (i = 0; i < vsi->nb_qps; i++)
4779                         info->queue_mapping[i] =
4780                                 rte_cpu_to_le_16(vsi->base_queue + i);
4781         } else {
4782                 info->mapping_flags |=
4783                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4784                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4785         }
4786         info->valid_sections |=
4787                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4788
4789         return I40E_SUCCESS;
4790 }
4791
4792 static int
4793 i40e_veb_release(struct i40e_veb *veb)
4794 {
4795         struct i40e_vsi *vsi;
4796         struct i40e_hw *hw;
4797
4798         if (veb == NULL)
4799                 return -EINVAL;
4800
4801         if (!TAILQ_EMPTY(&veb->head)) {
4802                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4803                 return -EACCES;
4804         }
4805         /* associate_vsi field is NULL for floating VEB */
4806         if (veb->associate_vsi != NULL) {
4807                 vsi = veb->associate_vsi;
4808                 hw = I40E_VSI_TO_HW(vsi);
4809
4810                 vsi->uplink_seid = veb->uplink_seid;
4811                 vsi->veb = NULL;
4812         } else {
4813                 veb->associate_pf->main_vsi->floating_veb = NULL;
4814                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4815         }
4816
4817         i40e_aq_delete_element(hw, veb->seid, NULL);
4818         rte_free(veb);
4819         return I40E_SUCCESS;
4820 }
4821
4822 /* Setup a veb */
4823 static struct i40e_veb *
4824 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4825 {
4826         struct i40e_veb *veb;
4827         int ret;
4828         struct i40e_hw *hw;
4829
4830         if (pf == NULL) {
4831                 PMD_DRV_LOG(ERR,
4832                             "veb setup failed, associated PF shouldn't null");
4833                 return NULL;
4834         }
4835         hw = I40E_PF_TO_HW(pf);
4836
4837         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4838         if (!veb) {
4839                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4840                 goto fail;
4841         }
4842
4843         veb->associate_vsi = vsi;
4844         veb->associate_pf = pf;
4845         TAILQ_INIT(&veb->head);
4846         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4847
4848         /* create floating veb if vsi is NULL */
4849         if (vsi != NULL) {
4850                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4851                                       I40E_DEFAULT_TCMAP, false,
4852                                       &veb->seid, false, NULL);
4853         } else {
4854                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4855                                       true, &veb->seid, false, NULL);
4856         }
4857
4858         if (ret != I40E_SUCCESS) {
4859                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4860                             hw->aq.asq_last_status);
4861                 goto fail;
4862         }
4863         veb->enabled_tc = I40E_DEFAULT_TCMAP;
4864
4865         /* get statistics index */
4866         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4867                                 &veb->stats_idx, NULL, NULL, NULL);
4868         if (ret != I40E_SUCCESS) {
4869                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
4870                             hw->aq.asq_last_status);
4871                 goto fail;
4872         }
4873         /* Get VEB bandwidth, to be implemented */
4874         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4875         if (vsi)
4876                 vsi->uplink_seid = veb->seid;
4877
4878         return veb;
4879 fail:
4880         rte_free(veb);
4881         return NULL;
4882 }
4883
4884 int
4885 i40e_vsi_release(struct i40e_vsi *vsi)
4886 {
4887         struct i40e_pf *pf;
4888         struct i40e_hw *hw;
4889         struct i40e_vsi_list *vsi_list;
4890         void *temp;
4891         int ret;
4892         struct i40e_mac_filter *f;
4893         uint16_t user_param;
4894
4895         if (!vsi)
4896                 return I40E_SUCCESS;
4897
4898         if (!vsi->adapter)
4899                 return -EFAULT;
4900
4901         user_param = vsi->user_param;
4902
4903         pf = I40E_VSI_TO_PF(vsi);
4904         hw = I40E_VSI_TO_HW(vsi);
4905
4906         /* VSI has child to attach, release child first */
4907         if (vsi->veb) {
4908                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
4909                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4910                                 return -1;
4911                 }
4912                 i40e_veb_release(vsi->veb);
4913         }
4914
4915         if (vsi->floating_veb) {
4916                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
4917                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4918                                 return -1;
4919                 }
4920         }
4921
4922         /* Remove all macvlan filters of the VSI */
4923         i40e_vsi_remove_all_macvlan_filter(vsi);
4924         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
4925                 rte_free(f);
4926
4927         if (vsi->type != I40E_VSI_MAIN &&
4928             ((vsi->type != I40E_VSI_SRIOV) ||
4929             !pf->floating_veb_list[user_param])) {
4930                 /* Remove vsi from parent's sibling list */
4931                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
4932                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4933                         return I40E_ERR_PARAM;
4934                 }
4935                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
4936                                 &vsi->sib_vsi_list, list);
4937
4938                 /* Remove all switch element of the VSI */
4939                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4940                 if (ret != I40E_SUCCESS)
4941                         PMD_DRV_LOG(ERR, "Failed to delete element");
4942         }
4943
4944         if ((vsi->type == I40E_VSI_SRIOV) &&
4945             pf->floating_veb_list[user_param]) {
4946                 /* Remove vsi from parent's sibling list */
4947                 if (vsi->parent_vsi == NULL ||
4948                     vsi->parent_vsi->floating_veb == NULL) {
4949                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4950                         return I40E_ERR_PARAM;
4951                 }
4952                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
4953                              &vsi->sib_vsi_list, list);
4954
4955                 /* Remove all switch element of the VSI */
4956                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4957                 if (ret != I40E_SUCCESS)
4958                         PMD_DRV_LOG(ERR, "Failed to delete element");
4959         }
4960
4961         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
4962
4963         if (vsi->type != I40E_VSI_SRIOV)
4964                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
4965         rte_free(vsi);
4966
4967         return I40E_SUCCESS;
4968 }
4969
4970 static int
4971 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
4972 {
4973         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4974         struct i40e_aqc_remove_macvlan_element_data def_filter;
4975         struct i40e_mac_filter_info filter;
4976         int ret;
4977
4978         if (vsi->type != I40E_VSI_MAIN)
4979                 return I40E_ERR_CONFIG;
4980         memset(&def_filter, 0, sizeof(def_filter));
4981         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
4982                                         ETH_ADDR_LEN);
4983         def_filter.vlan_tag = 0;
4984         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4985                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4986         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
4987         if (ret != I40E_SUCCESS) {
4988                 struct i40e_mac_filter *f;
4989                 struct ether_addr *mac;
4990
4991                 PMD_DRV_LOG(DEBUG,
4992                             "Cannot remove the default macvlan filter");
4993                 /* It needs to add the permanent mac into mac list */
4994                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4995                 if (f == NULL) {
4996                         PMD_DRV_LOG(ERR, "failed to allocate memory");
4997                         return I40E_ERR_NO_MEMORY;
4998                 }
4999                 mac = &f->mac_info.mac_addr;
5000                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5001                                 ETH_ADDR_LEN);
5002                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5003                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5004                 vsi->mac_num++;
5005
5006                 return ret;
5007         }
5008         rte_memcpy(&filter.mac_addr,
5009                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5010         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5011         return i40e_vsi_add_mac(vsi, &filter);
5012 }
5013
5014 /*
5015  * i40e_vsi_get_bw_config - Query VSI BW Information
5016  * @vsi: the VSI to be queried
5017  *
5018  * Returns 0 on success, negative value on failure
5019  */
5020 static enum i40e_status_code
5021 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5022 {
5023         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5024         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5025         struct i40e_hw *hw = &vsi->adapter->hw;
5026         i40e_status ret;
5027         int i;
5028         uint32_t bw_max;
5029
5030         memset(&bw_config, 0, sizeof(bw_config));
5031         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5032         if (ret != I40E_SUCCESS) {
5033                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5034                             hw->aq.asq_last_status);
5035                 return ret;
5036         }
5037
5038         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5039         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5040                                         &ets_sla_config, NULL);
5041         if (ret != I40E_SUCCESS) {
5042                 PMD_DRV_LOG(ERR,
5043                         "VSI failed to get TC bandwdith configuration %u",
5044                         hw->aq.asq_last_status);
5045                 return ret;
5046         }
5047
5048         /* store and print out BW info */
5049         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5050         vsi->bw_info.bw_max = bw_config.max_bw;
5051         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5052         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5053         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5054                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5055                      I40E_16_BIT_WIDTH);
5056         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5057                 vsi->bw_info.bw_ets_share_credits[i] =
5058                                 ets_sla_config.share_credits[i];
5059                 vsi->bw_info.bw_ets_credits[i] =
5060                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5061                 /* 4 bits per TC, 4th bit is reserved */
5062                 vsi->bw_info.bw_ets_max[i] =
5063                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5064                                   RTE_LEN2MASK(3, uint8_t));
5065                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5066                             vsi->bw_info.bw_ets_share_credits[i]);
5067                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5068                             vsi->bw_info.bw_ets_credits[i]);
5069                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5070                             vsi->bw_info.bw_ets_max[i]);
5071         }
5072
5073         return I40E_SUCCESS;
5074 }
5075
5076 /* i40e_enable_pf_lb
5077  * @pf: pointer to the pf structure
5078  *
5079  * allow loopback on pf
5080  */
5081 static inline void
5082 i40e_enable_pf_lb(struct i40e_pf *pf)
5083 {
5084         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5085         struct i40e_vsi_context ctxt;
5086         int ret;
5087
5088         /* Use the FW API if FW >= v5.0 */
5089         if (hw->aq.fw_maj_ver < 5) {
5090                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5091                 return;
5092         }
5093
5094         memset(&ctxt, 0, sizeof(ctxt));
5095         ctxt.seid = pf->main_vsi_seid;
5096         ctxt.pf_num = hw->pf_id;
5097         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5098         if (ret) {
5099                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5100                             ret, hw->aq.asq_last_status);
5101                 return;
5102         }
5103         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5104         ctxt.info.valid_sections =
5105                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5106         ctxt.info.switch_id |=
5107                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5108
5109         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5110         if (ret)
5111                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5112                             hw->aq.asq_last_status);
5113 }
5114
5115 /* Setup a VSI */
5116 struct i40e_vsi *
5117 i40e_vsi_setup(struct i40e_pf *pf,
5118                enum i40e_vsi_type type,
5119                struct i40e_vsi *uplink_vsi,
5120                uint16_t user_param)
5121 {
5122         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5123         struct i40e_vsi *vsi;
5124         struct i40e_mac_filter_info filter;
5125         int ret;
5126         struct i40e_vsi_context ctxt;
5127         struct ether_addr broadcast =
5128                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5129
5130         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5131             uplink_vsi == NULL) {
5132                 PMD_DRV_LOG(ERR,
5133                         "VSI setup failed, VSI link shouldn't be NULL");
5134                 return NULL;
5135         }
5136
5137         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5138                 PMD_DRV_LOG(ERR,
5139                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5140                 return NULL;
5141         }
5142
5143         /* two situations
5144          * 1.type is not MAIN and uplink vsi is not NULL
5145          * If uplink vsi didn't setup VEB, create one first under veb field
5146          * 2.type is SRIOV and the uplink is NULL
5147          * If floating VEB is NULL, create one veb under floating veb field
5148          */
5149
5150         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5151             uplink_vsi->veb == NULL) {
5152                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5153
5154                 if (uplink_vsi->veb == NULL) {
5155                         PMD_DRV_LOG(ERR, "VEB setup failed");
5156                         return NULL;
5157                 }
5158                 /* set ALLOWLOOPBACk on pf, when veb is created */
5159                 i40e_enable_pf_lb(pf);
5160         }
5161
5162         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5163             pf->main_vsi->floating_veb == NULL) {
5164                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5165
5166                 if (pf->main_vsi->floating_veb == NULL) {
5167                         PMD_DRV_LOG(ERR, "VEB setup failed");
5168                         return NULL;
5169                 }
5170         }
5171
5172         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5173         if (!vsi) {
5174                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5175                 return NULL;
5176         }
5177         TAILQ_INIT(&vsi->mac_list);
5178         vsi->type = type;
5179         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5180         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5181         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5182         vsi->user_param = user_param;
5183         vsi->vlan_anti_spoof_on = 0;
5184         vsi->vlan_filter_on = 0;
5185         /* Allocate queues */
5186         switch (vsi->type) {
5187         case I40E_VSI_MAIN  :
5188                 vsi->nb_qps = pf->lan_nb_qps;
5189                 break;
5190         case I40E_VSI_SRIOV :
5191                 vsi->nb_qps = pf->vf_nb_qps;
5192                 break;
5193         case I40E_VSI_VMDQ2:
5194                 vsi->nb_qps = pf->vmdq_nb_qps;
5195                 break;
5196         case I40E_VSI_FDIR:
5197                 vsi->nb_qps = pf->fdir_nb_qps;
5198                 break;
5199         default:
5200                 goto fail_mem;
5201         }
5202         /*
5203          * The filter status descriptor is reported in rx queue 0,
5204          * while the tx queue for fdir filter programming has no
5205          * such constraints, can be non-zero queues.
5206          * To simplify it, choose FDIR vsi use queue 0 pair.
5207          * To make sure it will use queue 0 pair, queue allocation
5208          * need be done before this function is called
5209          */
5210         if (type != I40E_VSI_FDIR) {
5211                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5212                         if (ret < 0) {
5213                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5214                                                 vsi->seid, ret);
5215                                 goto fail_mem;
5216                         }
5217                         vsi->base_queue = ret;
5218         } else
5219                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5220
5221         /* VF has MSIX interrupt in VF range, don't allocate here */
5222         if (type == I40E_VSI_MAIN) {
5223                 if (pf->support_multi_driver) {
5224                         /* If support multi-driver, need to use INT0 instead of
5225                          * allocating from msix pool. The Msix pool is init from
5226                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5227                          * to 1 without calling i40e_res_pool_alloc.
5228                          */
5229                         vsi->msix_intr = 0;
5230                         vsi->nb_msix = 1;
5231                 } else {
5232                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5233                                                   RTE_MIN(vsi->nb_qps,
5234                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5235                         if (ret < 0) {
5236                                 PMD_DRV_LOG(ERR,
5237                                             "VSI MAIN %d get heap failed %d",
5238                                             vsi->seid, ret);
5239                                 goto fail_queue_alloc;
5240                         }
5241                         vsi->msix_intr = ret;
5242                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5243                                                RTE_MAX_RXTX_INTR_VEC_ID);
5244                 }
5245         } else if (type != I40E_VSI_SRIOV) {
5246                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5247                 if (ret < 0) {
5248                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5249                         goto fail_queue_alloc;
5250                 }
5251                 vsi->msix_intr = ret;
5252                 vsi->nb_msix = 1;
5253         } else {
5254                 vsi->msix_intr = 0;
5255                 vsi->nb_msix = 0;
5256         }
5257
5258         /* Add VSI */
5259         if (type == I40E_VSI_MAIN) {
5260                 /* For main VSI, no need to add since it's default one */
5261                 vsi->uplink_seid = pf->mac_seid;
5262                 vsi->seid = pf->main_vsi_seid;
5263                 /* Bind queues with specific MSIX interrupt */
5264                 /**
5265                  * Needs 2 interrupt at least, one for misc cause which will
5266                  * enabled from OS side, Another for queues binding the
5267                  * interrupt from device side only.
5268                  */
5269
5270                 /* Get default VSI parameters from hardware */
5271                 memset(&ctxt, 0, sizeof(ctxt));
5272                 ctxt.seid = vsi->seid;
5273                 ctxt.pf_num = hw->pf_id;
5274                 ctxt.uplink_seid = vsi->uplink_seid;
5275                 ctxt.vf_num = 0;
5276                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5277                 if (ret != I40E_SUCCESS) {
5278                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5279                         goto fail_msix_alloc;
5280                 }
5281                 rte_memcpy(&vsi->info, &ctxt.info,
5282                         sizeof(struct i40e_aqc_vsi_properties_data));
5283                 vsi->vsi_id = ctxt.vsi_number;
5284                 vsi->info.valid_sections = 0;
5285
5286                 /* Configure tc, enabled TC0 only */
5287                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5288                         I40E_SUCCESS) {
5289                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5290                         goto fail_msix_alloc;
5291                 }
5292
5293                 /* TC, queue mapping */
5294                 memset(&ctxt, 0, sizeof(ctxt));
5295                 vsi->info.valid_sections |=
5296                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5297                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5298                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5299                 rte_memcpy(&ctxt.info, &vsi->info,
5300                         sizeof(struct i40e_aqc_vsi_properties_data));
5301                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5302                                                 I40E_DEFAULT_TCMAP);
5303                 if (ret != I40E_SUCCESS) {
5304                         PMD_DRV_LOG(ERR,
5305                                 "Failed to configure TC queue mapping");
5306                         goto fail_msix_alloc;
5307                 }
5308                 ctxt.seid = vsi->seid;
5309                 ctxt.pf_num = hw->pf_id;
5310                 ctxt.uplink_seid = vsi->uplink_seid;
5311                 ctxt.vf_num = 0;
5312
5313                 /* Update VSI parameters */
5314                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5315                 if (ret != I40E_SUCCESS) {
5316                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5317                         goto fail_msix_alloc;
5318                 }
5319
5320                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5321                                                 sizeof(vsi->info.tc_mapping));
5322                 rte_memcpy(&vsi->info.queue_mapping,
5323                                 &ctxt.info.queue_mapping,
5324                         sizeof(vsi->info.queue_mapping));
5325                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5326                 vsi->info.valid_sections = 0;
5327
5328                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5329                                 ETH_ADDR_LEN);
5330
5331                 /**
5332                  * Updating default filter settings are necessary to prevent
5333                  * reception of tagged packets.
5334                  * Some old firmware configurations load a default macvlan
5335                  * filter which accepts both tagged and untagged packets.
5336                  * The updating is to use a normal filter instead if needed.
5337                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5338                  * The firmware with correct configurations load the default
5339                  * macvlan filter which is expected and cannot be removed.
5340                  */
5341                 i40e_update_default_filter_setting(vsi);
5342                 i40e_config_qinq(hw, vsi);
5343         } else if (type == I40E_VSI_SRIOV) {
5344                 memset(&ctxt, 0, sizeof(ctxt));
5345                 /**
5346                  * For other VSI, the uplink_seid equals to uplink VSI's
5347                  * uplink_seid since they share same VEB
5348                  */
5349                 if (uplink_vsi == NULL)
5350                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5351                 else
5352                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5353                 ctxt.pf_num = hw->pf_id;
5354                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5355                 ctxt.uplink_seid = vsi->uplink_seid;
5356                 ctxt.connection_type = 0x1;
5357                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5358
5359                 /* Use the VEB configuration if FW >= v5.0 */
5360                 if (hw->aq.fw_maj_ver >= 5) {
5361                         /* Configure switch ID */
5362                         ctxt.info.valid_sections |=
5363                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5364                         ctxt.info.switch_id =
5365                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5366                 }
5367
5368                 /* Configure port/vlan */
5369                 ctxt.info.valid_sections |=
5370                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5371                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5372                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5373                                                 hw->func_caps.enabled_tcmap);
5374                 if (ret != I40E_SUCCESS) {
5375                         PMD_DRV_LOG(ERR,
5376                                 "Failed to configure TC queue mapping");
5377                         goto fail_msix_alloc;
5378                 }
5379
5380                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5381                 ctxt.info.valid_sections |=
5382                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5383                 /**
5384                  * Since VSI is not created yet, only configure parameter,
5385                  * will add vsi below.
5386                  */
5387
5388                 i40e_config_qinq(hw, vsi);
5389         } else if (type == I40E_VSI_VMDQ2) {
5390                 memset(&ctxt, 0, sizeof(ctxt));
5391                 /*
5392                  * For other VSI, the uplink_seid equals to uplink VSI's
5393                  * uplink_seid since they share same VEB
5394                  */
5395                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5396                 ctxt.pf_num = hw->pf_id;
5397                 ctxt.vf_num = 0;
5398                 ctxt.uplink_seid = vsi->uplink_seid;
5399                 ctxt.connection_type = 0x1;
5400                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5401
5402                 ctxt.info.valid_sections |=
5403                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5404                 /* user_param carries flag to enable loop back */
5405                 if (user_param) {
5406                         ctxt.info.switch_id =
5407                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5408                         ctxt.info.switch_id |=
5409                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5410                 }
5411
5412                 /* Configure port/vlan */
5413                 ctxt.info.valid_sections |=
5414                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5415                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5416                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5417                                                 I40E_DEFAULT_TCMAP);
5418                 if (ret != I40E_SUCCESS) {
5419                         PMD_DRV_LOG(ERR,
5420                                 "Failed to configure TC queue mapping");
5421                         goto fail_msix_alloc;
5422                 }
5423                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5424                 ctxt.info.valid_sections |=
5425                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5426         } else if (type == I40E_VSI_FDIR) {
5427                 memset(&ctxt, 0, sizeof(ctxt));
5428                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5429                 ctxt.pf_num = hw->pf_id;
5430                 ctxt.vf_num = 0;
5431                 ctxt.uplink_seid = vsi->uplink_seid;
5432                 ctxt.connection_type = 0x1;     /* regular data port */
5433                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5434                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5435                                                 I40E_DEFAULT_TCMAP);
5436                 if (ret != I40E_SUCCESS) {
5437                         PMD_DRV_LOG(ERR,
5438                                 "Failed to configure TC queue mapping.");
5439                         goto fail_msix_alloc;
5440                 }
5441                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5442                 ctxt.info.valid_sections |=
5443                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5444         } else {
5445                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5446                 goto fail_msix_alloc;
5447         }
5448
5449         if (vsi->type != I40E_VSI_MAIN) {
5450                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5451                 if (ret != I40E_SUCCESS) {
5452                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5453                                     hw->aq.asq_last_status);
5454                         goto fail_msix_alloc;
5455                 }
5456                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5457                 vsi->info.valid_sections = 0;
5458                 vsi->seid = ctxt.seid;
5459                 vsi->vsi_id = ctxt.vsi_number;
5460                 vsi->sib_vsi_list.vsi = vsi;
5461                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5462                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5463                                           &vsi->sib_vsi_list, list);
5464                 } else {
5465                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5466                                           &vsi->sib_vsi_list, list);
5467                 }
5468         }
5469
5470         /* MAC/VLAN configuration */
5471         rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5472         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5473
5474         ret = i40e_vsi_add_mac(vsi, &filter);
5475         if (ret != I40E_SUCCESS) {
5476                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5477                 goto fail_msix_alloc;
5478         }
5479
5480         /* Get VSI BW information */
5481         i40e_vsi_get_bw_config(vsi);
5482         return vsi;
5483 fail_msix_alloc:
5484         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5485 fail_queue_alloc:
5486         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5487 fail_mem:
5488         rte_free(vsi);
5489         return NULL;
5490 }
5491
5492 /* Configure vlan filter on or off */
5493 int
5494 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5495 {
5496         int i, num;
5497         struct i40e_mac_filter *f;
5498         void *temp;
5499         struct i40e_mac_filter_info *mac_filter;
5500         enum rte_mac_filter_type desired_filter;
5501         int ret = I40E_SUCCESS;
5502
5503         if (on) {
5504                 /* Filter to match MAC and VLAN */
5505                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5506         } else {
5507                 /* Filter to match only MAC */
5508                 desired_filter = RTE_MAC_PERFECT_MATCH;
5509         }
5510
5511         num = vsi->mac_num;
5512
5513         mac_filter = rte_zmalloc("mac_filter_info_data",
5514                                  num * sizeof(*mac_filter), 0);
5515         if (mac_filter == NULL) {
5516                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5517                 return I40E_ERR_NO_MEMORY;
5518         }
5519
5520         i = 0;
5521
5522         /* Remove all existing mac */
5523         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5524                 mac_filter[i] = f->mac_info;
5525                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5526                 if (ret) {
5527                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5528                                     on ? "enable" : "disable");
5529                         goto DONE;
5530                 }
5531                 i++;
5532         }
5533
5534         /* Override with new filter */
5535         for (i = 0; i < num; i++) {
5536                 mac_filter[i].filter_type = desired_filter;
5537                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5538                 if (ret) {
5539                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5540                                     on ? "enable" : "disable");
5541                         goto DONE;
5542                 }
5543         }
5544
5545 DONE:
5546         rte_free(mac_filter);
5547         return ret;
5548 }
5549
5550 /* Configure vlan stripping on or off */
5551 int
5552 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5553 {
5554         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5555         struct i40e_vsi_context ctxt;
5556         uint8_t vlan_flags;
5557         int ret = I40E_SUCCESS;
5558
5559         /* Check if it has been already on or off */
5560         if (vsi->info.valid_sections &
5561                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5562                 if (on) {
5563                         if ((vsi->info.port_vlan_flags &
5564                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5565                                 return 0; /* already on */
5566                 } else {
5567                         if ((vsi->info.port_vlan_flags &
5568                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5569                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5570                                 return 0; /* already off */
5571                 }
5572         }
5573
5574         if (on)
5575                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5576         else
5577                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5578         vsi->info.valid_sections =
5579                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5580         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5581         vsi->info.port_vlan_flags |= vlan_flags;
5582         ctxt.seid = vsi->seid;
5583         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5584         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5585         if (ret)
5586                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5587                             on ? "enable" : "disable");
5588
5589         return ret;
5590 }
5591
5592 static int
5593 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5594 {
5595         struct rte_eth_dev_data *data = dev->data;
5596         int ret;
5597         int mask = 0;
5598
5599         /* Apply vlan offload setting */
5600         mask = ETH_VLAN_STRIP_MASK |
5601                ETH_VLAN_FILTER_MASK |
5602                ETH_VLAN_EXTEND_MASK;
5603         ret = i40e_vlan_offload_set(dev, mask);
5604         if (ret) {
5605                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
5606                 return ret;
5607         }
5608
5609         /* Apply pvid setting */
5610         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5611                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
5612         if (ret)
5613                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5614
5615         return ret;
5616 }
5617
5618 static int
5619 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5620 {
5621         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5622
5623         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5624 }
5625
5626 static int
5627 i40e_update_flow_control(struct i40e_hw *hw)
5628 {
5629 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5630         struct i40e_link_status link_status;
5631         uint32_t rxfc = 0, txfc = 0, reg;
5632         uint8_t an_info;
5633         int ret;
5634
5635         memset(&link_status, 0, sizeof(link_status));
5636         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5637         if (ret != I40E_SUCCESS) {
5638                 PMD_DRV_LOG(ERR, "Failed to get link status information");
5639                 goto write_reg; /* Disable flow control */
5640         }
5641
5642         an_info = hw->phy.link_info.an_info;
5643         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5644                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5645                 ret = I40E_ERR_NOT_READY;
5646                 goto write_reg; /* Disable flow control */
5647         }
5648         /**
5649          * If link auto negotiation is enabled, flow control needs to
5650          * be configured according to it
5651          */
5652         switch (an_info & I40E_LINK_PAUSE_RXTX) {
5653         case I40E_LINK_PAUSE_RXTX:
5654                 rxfc = 1;
5655                 txfc = 1;
5656                 hw->fc.current_mode = I40E_FC_FULL;
5657                 break;
5658         case I40E_AQ_LINK_PAUSE_RX:
5659                 rxfc = 1;
5660                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5661                 break;
5662         case I40E_AQ_LINK_PAUSE_TX:
5663                 txfc = 1;
5664                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5665                 break;
5666         default:
5667                 hw->fc.current_mode = I40E_FC_NONE;
5668                 break;
5669         }
5670
5671 write_reg:
5672         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5673                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5674         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5675         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5676         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5677         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5678
5679         return ret;
5680 }
5681
5682 /* PF setup */
5683 static int
5684 i40e_pf_setup(struct i40e_pf *pf)
5685 {
5686         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5687         struct i40e_filter_control_settings settings;
5688         struct i40e_vsi *vsi;
5689         int ret;
5690
5691         /* Clear all stats counters */
5692         pf->offset_loaded = FALSE;
5693         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5694         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5695         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
5696         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
5697
5698         ret = i40e_pf_get_switch_config(pf);
5699         if (ret != I40E_SUCCESS) {
5700                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5701                 return ret;
5702         }
5703         if (pf->flags & I40E_FLAG_FDIR) {
5704                 /* make queue allocated first, let FDIR use queue pair 0*/
5705                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5706                 if (ret != I40E_FDIR_QUEUE_ID) {
5707                         PMD_DRV_LOG(ERR,
5708                                 "queue allocation fails for FDIR: ret =%d",
5709                                 ret);
5710                         pf->flags &= ~I40E_FLAG_FDIR;
5711                 }
5712         }
5713         /*  main VSI setup */
5714         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5715         if (!vsi) {
5716                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5717                 return I40E_ERR_NOT_READY;
5718         }
5719         pf->main_vsi = vsi;
5720
5721         /* Configure filter control */
5722         memset(&settings, 0, sizeof(settings));
5723         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5724                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5725         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5726                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5727         else {
5728                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5729                         hw->func_caps.rss_table_size);
5730                 return I40E_ERR_PARAM;
5731         }
5732         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5733                 hw->func_caps.rss_table_size);
5734         pf->hash_lut_size = hw->func_caps.rss_table_size;
5735
5736         /* Enable ethtype and macvlan filters */
5737         settings.enable_ethtype = TRUE;
5738         settings.enable_macvlan = TRUE;
5739         ret = i40e_set_filter_control(hw, &settings);
5740         if (ret)
5741                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5742                                                                 ret);
5743
5744         /* Update flow control according to the auto negotiation */
5745         i40e_update_flow_control(hw);
5746
5747         return I40E_SUCCESS;
5748 }
5749
5750 int
5751 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5752 {
5753         uint32_t reg;
5754         uint16_t j;
5755
5756         /**
5757          * Set or clear TX Queue Disable flags,
5758          * which is required by hardware.
5759          */
5760         i40e_pre_tx_queue_cfg(hw, q_idx, on);
5761         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5762
5763         /* Wait until the request is finished */
5764         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5765                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5766                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5767                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5768                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5769                                                         & 0x1))) {
5770                         break;
5771                 }
5772         }
5773         if (on) {
5774                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5775                         return I40E_SUCCESS; /* already on, skip next steps */
5776
5777                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5778                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5779         } else {
5780                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5781                         return I40E_SUCCESS; /* already off, skip next steps */
5782                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5783         }
5784         /* Write the register */
5785         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5786         /* Check the result */
5787         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5788                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5789                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5790                 if (on) {
5791                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5792                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5793                                 break;
5794                 } else {
5795                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5796                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5797                                 break;
5798                 }
5799         }
5800         /* Check if it is timeout */
5801         if (j >= I40E_CHK_Q_ENA_COUNT) {
5802                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5803                             (on ? "enable" : "disable"), q_idx);
5804                 return I40E_ERR_TIMEOUT;
5805         }
5806
5807         return I40E_SUCCESS;
5808 }
5809
5810 /* Swith on or off the tx queues */
5811 static int
5812 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5813 {
5814         struct rte_eth_dev_data *dev_data = pf->dev_data;
5815         struct i40e_tx_queue *txq;
5816         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5817         uint16_t i;
5818         int ret;
5819
5820         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5821                 txq = dev_data->tx_queues[i];
5822                 /* Don't operate the queue if not configured or
5823                  * if starting only per queue */
5824                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5825                         continue;
5826                 if (on)
5827                         ret = i40e_dev_tx_queue_start(dev, i);
5828                 else
5829                         ret = i40e_dev_tx_queue_stop(dev, i);
5830                 if ( ret != I40E_SUCCESS)
5831                         return ret;
5832         }
5833
5834         return I40E_SUCCESS;
5835 }
5836
5837 int
5838 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5839 {
5840         uint32_t reg;
5841         uint16_t j;
5842
5843         /* Wait until the request is finished */
5844         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5845                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5846                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5847                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5848                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5849                         break;
5850         }
5851
5852         if (on) {
5853                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5854                         return I40E_SUCCESS; /* Already on, skip next steps */
5855                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5856         } else {
5857                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5858                         return I40E_SUCCESS; /* Already off, skip next steps */
5859                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5860         }
5861
5862         /* Write the register */
5863         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5864         /* Check the result */
5865         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5866                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5867                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5868                 if (on) {
5869                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5870                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
5871                                 break;
5872                 } else {
5873                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5874                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5875                                 break;
5876                 }
5877         }
5878
5879         /* Check if it is timeout */
5880         if (j >= I40E_CHK_Q_ENA_COUNT) {
5881                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
5882                             (on ? "enable" : "disable"), q_idx);
5883                 return I40E_ERR_TIMEOUT;
5884         }
5885
5886         return I40E_SUCCESS;
5887 }
5888 /* Switch on or off the rx queues */
5889 static int
5890 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
5891 {
5892         struct rte_eth_dev_data *dev_data = pf->dev_data;
5893         struct i40e_rx_queue *rxq;
5894         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5895         uint16_t i;
5896         int ret;
5897
5898         for (i = 0; i < dev_data->nb_rx_queues; i++) {
5899                 rxq = dev_data->rx_queues[i];
5900                 /* Don't operate the queue if not configured or
5901                  * if starting only per queue */
5902                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
5903                         continue;
5904                 if (on)
5905                         ret = i40e_dev_rx_queue_start(dev, i);
5906                 else
5907                         ret = i40e_dev_rx_queue_stop(dev, i);
5908                 if (ret != I40E_SUCCESS)
5909                         return ret;
5910         }
5911
5912         return I40E_SUCCESS;
5913 }
5914
5915 /* Switch on or off all the rx/tx queues */
5916 int
5917 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
5918 {
5919         int ret;
5920
5921         if (on) {
5922                 /* enable rx queues before enabling tx queues */
5923                 ret = i40e_dev_switch_rx_queues(pf, on);
5924                 if (ret) {
5925                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
5926                         return ret;
5927                 }
5928                 ret = i40e_dev_switch_tx_queues(pf, on);
5929         } else {
5930                 /* Stop tx queues before stopping rx queues */
5931                 ret = i40e_dev_switch_tx_queues(pf, on);
5932                 if (ret) {
5933                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
5934                         return ret;
5935                 }
5936                 ret = i40e_dev_switch_rx_queues(pf, on);
5937         }
5938
5939         return ret;
5940 }
5941
5942 /* Initialize VSI for TX */
5943 static int
5944 i40e_dev_tx_init(struct i40e_pf *pf)
5945 {
5946         struct rte_eth_dev_data *data = pf->dev_data;
5947         uint16_t i;
5948         uint32_t ret = I40E_SUCCESS;
5949         struct i40e_tx_queue *txq;
5950
5951         for (i = 0; i < data->nb_tx_queues; i++) {
5952                 txq = data->tx_queues[i];
5953                 if (!txq || !txq->q_set)
5954                         continue;
5955                 ret = i40e_tx_queue_init(txq);
5956                 if (ret != I40E_SUCCESS)
5957                         break;
5958         }
5959         if (ret == I40E_SUCCESS)
5960                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
5961                                      ->eth_dev);
5962
5963         return ret;
5964 }
5965
5966 /* Initialize VSI for RX */
5967 static int
5968 i40e_dev_rx_init(struct i40e_pf *pf)
5969 {
5970         struct rte_eth_dev_data *data = pf->dev_data;
5971         int ret = I40E_SUCCESS;
5972         uint16_t i;
5973         struct i40e_rx_queue *rxq;
5974
5975         i40e_pf_config_mq_rx(pf);
5976         for (i = 0; i < data->nb_rx_queues; i++) {
5977                 rxq = data->rx_queues[i];
5978                 if (!rxq || !rxq->q_set)
5979                         continue;
5980
5981                 ret = i40e_rx_queue_init(rxq);
5982                 if (ret != I40E_SUCCESS) {
5983                         PMD_DRV_LOG(ERR,
5984                                 "Failed to do RX queue initialization");
5985                         break;
5986                 }
5987         }
5988         if (ret == I40E_SUCCESS)
5989                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
5990                                      ->eth_dev);
5991
5992         return ret;
5993 }
5994
5995 static int
5996 i40e_dev_rxtx_init(struct i40e_pf *pf)
5997 {
5998         int err;
5999
6000         err = i40e_dev_tx_init(pf);
6001         if (err) {
6002                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6003                 return err;
6004         }
6005         err = i40e_dev_rx_init(pf);
6006         if (err) {
6007                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6008                 return err;
6009         }
6010
6011         return err;
6012 }
6013
6014 static int
6015 i40e_vmdq_setup(struct rte_eth_dev *dev)
6016 {
6017         struct rte_eth_conf *conf = &dev->data->dev_conf;
6018         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6019         int i, err, conf_vsis, j, loop;
6020         struct i40e_vsi *vsi;
6021         struct i40e_vmdq_info *vmdq_info;
6022         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6023         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6024
6025         /*
6026          * Disable interrupt to avoid message from VF. Furthermore, it will
6027          * avoid race condition in VSI creation/destroy.
6028          */
6029         i40e_pf_disable_irq0(hw);
6030
6031         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6032                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6033                 return -ENOTSUP;
6034         }
6035
6036         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6037         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6038                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6039                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6040                         pf->max_nb_vmdq_vsi);
6041                 return -ENOTSUP;
6042         }
6043
6044         if (pf->vmdq != NULL) {
6045                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6046                 return 0;
6047         }
6048
6049         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6050                                 sizeof(*vmdq_info) * conf_vsis, 0);
6051
6052         if (pf->vmdq == NULL) {
6053                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6054                 return -ENOMEM;
6055         }
6056
6057         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6058
6059         /* Create VMDQ VSI */
6060         for (i = 0; i < conf_vsis; i++) {
6061                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6062                                 vmdq_conf->enable_loop_back);
6063                 if (vsi == NULL) {
6064                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6065                         err = -1;
6066                         goto err_vsi_setup;
6067                 }
6068                 vmdq_info = &pf->vmdq[i];
6069                 vmdq_info->pf = pf;
6070                 vmdq_info->vsi = vsi;
6071         }
6072         pf->nb_cfg_vmdq_vsi = conf_vsis;
6073
6074         /* Configure Vlan */
6075         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6076         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6077                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6078                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6079                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6080                                         vmdq_conf->pool_map[i].vlan_id, j);
6081
6082                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6083                                                 vmdq_conf->pool_map[i].vlan_id);
6084                                 if (err) {
6085                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6086                                         err = -1;
6087                                         goto err_vsi_setup;
6088                                 }
6089                         }
6090                 }
6091         }
6092
6093         i40e_pf_enable_irq0(hw);
6094
6095         return 0;
6096
6097 err_vsi_setup:
6098         for (i = 0; i < conf_vsis; i++)
6099                 if (pf->vmdq[i].vsi == NULL)
6100                         break;
6101                 else
6102                         i40e_vsi_release(pf->vmdq[i].vsi);
6103
6104         rte_free(pf->vmdq);
6105         pf->vmdq = NULL;
6106         i40e_pf_enable_irq0(hw);
6107         return err;
6108 }
6109
6110 static void
6111 i40e_stat_update_32(struct i40e_hw *hw,
6112                    uint32_t reg,
6113                    bool offset_loaded,
6114                    uint64_t *offset,
6115                    uint64_t *stat)
6116 {
6117         uint64_t new_data;
6118
6119         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6120         if (!offset_loaded)
6121                 *offset = new_data;
6122
6123         if (new_data >= *offset)
6124                 *stat = (uint64_t)(new_data - *offset);
6125         else
6126                 *stat = (uint64_t)((new_data +
6127                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6128 }
6129
6130 static void
6131 i40e_stat_update_48(struct i40e_hw *hw,
6132                    uint32_t hireg,
6133                    uint32_t loreg,
6134                    bool offset_loaded,
6135                    uint64_t *offset,
6136                    uint64_t *stat)
6137 {
6138         uint64_t new_data;
6139
6140         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6141         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6142                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6143
6144         if (!offset_loaded)
6145                 *offset = new_data;
6146
6147         if (new_data >= *offset)
6148                 *stat = new_data - *offset;
6149         else
6150                 *stat = (uint64_t)((new_data +
6151                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6152
6153         *stat &= I40E_48_BIT_MASK;
6154 }
6155
6156 /* Disable IRQ0 */
6157 void
6158 i40e_pf_disable_irq0(struct i40e_hw *hw)
6159 {
6160         /* Disable all interrupt types */
6161         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6162                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6163         I40E_WRITE_FLUSH(hw);
6164 }
6165
6166 /* Enable IRQ0 */
6167 void
6168 i40e_pf_enable_irq0(struct i40e_hw *hw)
6169 {
6170         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6171                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6172                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6173                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6174         I40E_WRITE_FLUSH(hw);
6175 }
6176
6177 static void
6178 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6179 {
6180         /* read pending request and disable first */
6181         i40e_pf_disable_irq0(hw);
6182         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6183         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6184                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6185
6186         if (no_queue)
6187                 /* Link no queues with irq0 */
6188                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6189                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6190 }
6191
6192 static void
6193 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6194 {
6195         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6196         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6197         int i;
6198         uint16_t abs_vf_id;
6199         uint32_t index, offset, val;
6200
6201         if (!pf->vfs)
6202                 return;
6203         /**
6204          * Try to find which VF trigger a reset, use absolute VF id to access
6205          * since the reg is global register.
6206          */
6207         for (i = 0; i < pf->vf_num; i++) {
6208                 abs_vf_id = hw->func_caps.vf_base_id + i;
6209                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6210                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6211                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6212                 /* VFR event occurred */
6213                 if (val & (0x1 << offset)) {
6214                         int ret;
6215
6216                         /* Clear the event first */
6217                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6218                                                         (0x1 << offset));
6219                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6220                         /**
6221                          * Only notify a VF reset event occurred,
6222                          * don't trigger another SW reset
6223                          */
6224                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6225                         if (ret != I40E_SUCCESS)
6226                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6227                 }
6228         }
6229 }
6230
6231 static void
6232 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6233 {
6234         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6235         int i;
6236
6237         for (i = 0; i < pf->vf_num; i++)
6238                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6239 }
6240
6241 static void
6242 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6243 {
6244         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6245         struct i40e_arq_event_info info;
6246         uint16_t pending, opcode;
6247         int ret;
6248
6249         info.buf_len = I40E_AQ_BUF_SZ;
6250         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6251         if (!info.msg_buf) {
6252                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6253                 return;
6254         }
6255
6256         pending = 1;
6257         while (pending) {
6258                 ret = i40e_clean_arq_element(hw, &info, &pending);
6259
6260                 if (ret != I40E_SUCCESS) {
6261                         PMD_DRV_LOG(INFO,
6262                                 "Failed to read msg from AdminQ, aq_err: %u",
6263                                 hw->aq.asq_last_status);
6264                         break;
6265                 }
6266                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6267
6268                 switch (opcode) {
6269                 case i40e_aqc_opc_send_msg_to_pf:
6270                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6271                         i40e_pf_host_handle_vf_msg(dev,
6272                                         rte_le_to_cpu_16(info.desc.retval),
6273                                         rte_le_to_cpu_32(info.desc.cookie_high),
6274                                         rte_le_to_cpu_32(info.desc.cookie_low),
6275                                         info.msg_buf,
6276                                         info.msg_len);
6277                         break;
6278                 case i40e_aqc_opc_get_link_status:
6279                         ret = i40e_dev_link_update(dev, 0);
6280                         if (!ret)
6281                                 _rte_eth_dev_callback_process(dev,
6282                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6283                         break;
6284                 default:
6285                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6286                                     opcode);
6287                         break;
6288                 }
6289         }
6290         rte_free(info.msg_buf);
6291 }
6292
6293 /**
6294  * Interrupt handler triggered by NIC  for handling
6295  * specific interrupt.
6296  *
6297  * @param handle
6298  *  Pointer to interrupt handle.
6299  * @param param
6300  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6301  *
6302  * @return
6303  *  void
6304  */
6305 static void
6306 i40e_dev_interrupt_handler(void *param)
6307 {
6308         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6309         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6310         uint32_t icr0;
6311
6312         /* Disable interrupt */
6313         i40e_pf_disable_irq0(hw);
6314
6315         /* read out interrupt causes */
6316         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6317
6318         /* No interrupt event indicated */
6319         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6320                 PMD_DRV_LOG(INFO, "No interrupt event");
6321                 goto done;
6322         }
6323         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6324                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6325         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6326                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6327         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6328                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6329         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6330                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6331         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6332                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6333         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6334                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6335         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6336                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6337
6338         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6339                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6340                 i40e_dev_handle_vfr_event(dev);
6341         }
6342         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6343                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6344                 i40e_dev_handle_aq_msg(dev);
6345         }
6346
6347 done:
6348         /* Enable interrupt */
6349         i40e_pf_enable_irq0(hw);
6350         rte_intr_enable(dev->intr_handle);
6351 }
6352
6353 int
6354 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6355                          struct i40e_macvlan_filter *filter,
6356                          int total)
6357 {
6358         int ele_num, ele_buff_size;
6359         int num, actual_num, i;
6360         uint16_t flags;
6361         int ret = I40E_SUCCESS;
6362         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6363         struct i40e_aqc_add_macvlan_element_data *req_list;
6364
6365         if (filter == NULL  || total == 0)
6366                 return I40E_ERR_PARAM;
6367         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6368         ele_buff_size = hw->aq.asq_buf_size;
6369
6370         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6371         if (req_list == NULL) {
6372                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6373                 return I40E_ERR_NO_MEMORY;
6374         }
6375
6376         num = 0;
6377         do {
6378                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6379                 memset(req_list, 0, ele_buff_size);
6380
6381                 for (i = 0; i < actual_num; i++) {
6382                         rte_memcpy(req_list[i].mac_addr,
6383                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6384                         req_list[i].vlan_tag =
6385                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6386
6387                         switch (filter[num + i].filter_type) {
6388                         case RTE_MAC_PERFECT_MATCH:
6389                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6390                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6391                                 break;
6392                         case RTE_MACVLAN_PERFECT_MATCH:
6393                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6394                                 break;
6395                         case RTE_MAC_HASH_MATCH:
6396                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6397                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6398                                 break;
6399                         case RTE_MACVLAN_HASH_MATCH:
6400                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6401                                 break;
6402                         default:
6403                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6404                                 ret = I40E_ERR_PARAM;
6405                                 goto DONE;
6406                         }
6407
6408                         req_list[i].queue_number = 0;
6409
6410                         req_list[i].flags = rte_cpu_to_le_16(flags);
6411                 }
6412
6413                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6414                                                 actual_num, NULL);
6415                 if (ret != I40E_SUCCESS) {
6416                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6417                         goto DONE;
6418                 }
6419                 num += actual_num;
6420         } while (num < total);
6421
6422 DONE:
6423         rte_free(req_list);
6424         return ret;
6425 }
6426
6427 int
6428 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6429                             struct i40e_macvlan_filter *filter,
6430                             int total)
6431 {
6432         int ele_num, ele_buff_size;
6433         int num, actual_num, i;
6434         uint16_t flags;
6435         int ret = I40E_SUCCESS;
6436         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6437         struct i40e_aqc_remove_macvlan_element_data *req_list;
6438
6439         if (filter == NULL  || total == 0)
6440                 return I40E_ERR_PARAM;
6441
6442         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6443         ele_buff_size = hw->aq.asq_buf_size;
6444
6445         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6446         if (req_list == NULL) {
6447                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6448                 return I40E_ERR_NO_MEMORY;
6449         }
6450
6451         num = 0;
6452         do {
6453                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6454                 memset(req_list, 0, ele_buff_size);
6455
6456                 for (i = 0; i < actual_num; i++) {
6457                         rte_memcpy(req_list[i].mac_addr,
6458                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6459                         req_list[i].vlan_tag =
6460                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6461
6462                         switch (filter[num + i].filter_type) {
6463                         case RTE_MAC_PERFECT_MATCH:
6464                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6465                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6466                                 break;
6467                         case RTE_MACVLAN_PERFECT_MATCH:
6468                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6469                                 break;
6470                         case RTE_MAC_HASH_MATCH:
6471                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6472                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6473                                 break;
6474                         case RTE_MACVLAN_HASH_MATCH:
6475                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6476                                 break;
6477                         default:
6478                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6479                                 ret = I40E_ERR_PARAM;
6480                                 goto DONE;
6481                         }
6482                         req_list[i].flags = rte_cpu_to_le_16(flags);
6483                 }
6484
6485                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6486                                                 actual_num, NULL);
6487                 if (ret != I40E_SUCCESS) {
6488                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6489                         goto DONE;
6490                 }
6491                 num += actual_num;
6492         } while (num < total);
6493
6494 DONE:
6495         rte_free(req_list);
6496         return ret;
6497 }
6498
6499 /* Find out specific MAC filter */
6500 static struct i40e_mac_filter *
6501 i40e_find_mac_filter(struct i40e_vsi *vsi,
6502                          struct ether_addr *macaddr)
6503 {
6504         struct i40e_mac_filter *f;
6505
6506         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6507                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6508                         return f;
6509         }
6510
6511         return NULL;
6512 }
6513
6514 static bool
6515 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6516                          uint16_t vlan_id)
6517 {
6518         uint32_t vid_idx, vid_bit;
6519
6520         if (vlan_id > ETH_VLAN_ID_MAX)
6521                 return 0;
6522
6523         vid_idx = I40E_VFTA_IDX(vlan_id);
6524         vid_bit = I40E_VFTA_BIT(vlan_id);
6525
6526         if (vsi->vfta[vid_idx] & vid_bit)
6527                 return 1;
6528         else
6529                 return 0;
6530 }
6531
6532 static void
6533 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6534                        uint16_t vlan_id, bool on)
6535 {
6536         uint32_t vid_idx, vid_bit;
6537
6538         vid_idx = I40E_VFTA_IDX(vlan_id);
6539         vid_bit = I40E_VFTA_BIT(vlan_id);
6540
6541         if (on)
6542                 vsi->vfta[vid_idx] |= vid_bit;
6543         else
6544                 vsi->vfta[vid_idx] &= ~vid_bit;
6545 }
6546
6547 void
6548 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6549                      uint16_t vlan_id, bool on)
6550 {
6551         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6552         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6553         int ret;
6554
6555         if (vlan_id > ETH_VLAN_ID_MAX)
6556                 return;
6557
6558         i40e_store_vlan_filter(vsi, vlan_id, on);
6559
6560         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6561                 return;
6562
6563         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6564
6565         if (on) {
6566                 ret = i40e_aq_add_vlan(hw, vsi->seid,
6567                                        &vlan_data, 1, NULL);
6568                 if (ret != I40E_SUCCESS)
6569                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6570         } else {
6571                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6572                                           &vlan_data, 1, NULL);
6573                 if (ret != I40E_SUCCESS)
6574                         PMD_DRV_LOG(ERR,
6575                                     "Failed to remove vlan filter");
6576         }
6577 }
6578
6579 /**
6580  * Find all vlan options for specific mac addr,
6581  * return with actual vlan found.
6582  */
6583 int
6584 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6585                            struct i40e_macvlan_filter *mv_f,
6586                            int num, struct ether_addr *addr)
6587 {
6588         int i;
6589         uint32_t j, k;
6590
6591         /**
6592          * Not to use i40e_find_vlan_filter to decrease the loop time,
6593          * although the code looks complex.
6594           */
6595         if (num < vsi->vlan_num)
6596                 return I40E_ERR_PARAM;
6597
6598         i = 0;
6599         for (j = 0; j < I40E_VFTA_SIZE; j++) {
6600                 if (vsi->vfta[j]) {
6601                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6602                                 if (vsi->vfta[j] & (1 << k)) {
6603                                         if (i > num - 1) {
6604                                                 PMD_DRV_LOG(ERR,
6605                                                         "vlan number doesn't match");
6606                                                 return I40E_ERR_PARAM;
6607                                         }
6608                                         rte_memcpy(&mv_f[i].macaddr,
6609                                                         addr, ETH_ADDR_LEN);
6610                                         mv_f[i].vlan_id =
6611                                                 j * I40E_UINT32_BIT_SIZE + k;
6612                                         i++;
6613                                 }
6614                         }
6615                 }
6616         }
6617         return I40E_SUCCESS;
6618 }
6619
6620 static inline int
6621 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6622                            struct i40e_macvlan_filter *mv_f,
6623                            int num,
6624                            uint16_t vlan)
6625 {
6626         int i = 0;
6627         struct i40e_mac_filter *f;
6628
6629         if (num < vsi->mac_num)
6630                 return I40E_ERR_PARAM;
6631
6632         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6633                 if (i > num - 1) {
6634                         PMD_DRV_LOG(ERR, "buffer number not match");
6635                         return I40E_ERR_PARAM;
6636                 }
6637                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6638                                 ETH_ADDR_LEN);
6639                 mv_f[i].vlan_id = vlan;
6640                 mv_f[i].filter_type = f->mac_info.filter_type;
6641                 i++;
6642         }
6643
6644         return I40E_SUCCESS;
6645 }
6646
6647 static int
6648 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6649 {
6650         int i, j, num;
6651         struct i40e_mac_filter *f;
6652         struct i40e_macvlan_filter *mv_f;
6653         int ret = I40E_SUCCESS;
6654
6655         if (vsi == NULL || vsi->mac_num == 0)
6656                 return I40E_ERR_PARAM;
6657
6658         /* Case that no vlan is set */
6659         if (vsi->vlan_num == 0)
6660                 num = vsi->mac_num;
6661         else
6662                 num = vsi->mac_num * vsi->vlan_num;
6663
6664         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6665         if (mv_f == NULL) {
6666                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6667                 return I40E_ERR_NO_MEMORY;
6668         }
6669
6670         i = 0;
6671         if (vsi->vlan_num == 0) {
6672                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6673                         rte_memcpy(&mv_f[i].macaddr,
6674                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6675                         mv_f[i].filter_type = f->mac_info.filter_type;
6676                         mv_f[i].vlan_id = 0;
6677                         i++;
6678                 }
6679         } else {
6680                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6681                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6682                                         vsi->vlan_num, &f->mac_info.mac_addr);
6683                         if (ret != I40E_SUCCESS)
6684                                 goto DONE;
6685                         for (j = i; j < i + vsi->vlan_num; j++)
6686                                 mv_f[j].filter_type = f->mac_info.filter_type;
6687                         i += vsi->vlan_num;
6688                 }
6689         }
6690
6691         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6692 DONE:
6693         rte_free(mv_f);
6694
6695         return ret;
6696 }
6697
6698 int
6699 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6700 {
6701         struct i40e_macvlan_filter *mv_f;
6702         int mac_num;
6703         int ret = I40E_SUCCESS;
6704
6705         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6706                 return I40E_ERR_PARAM;
6707
6708         /* If it's already set, just return */
6709         if (i40e_find_vlan_filter(vsi,vlan))
6710                 return I40E_SUCCESS;
6711
6712         mac_num = vsi->mac_num;
6713
6714         if (mac_num == 0) {
6715                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6716                 return I40E_ERR_PARAM;
6717         }
6718
6719         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6720
6721         if (mv_f == NULL) {
6722                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6723                 return I40E_ERR_NO_MEMORY;
6724         }
6725
6726         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6727
6728         if (ret != I40E_SUCCESS)
6729                 goto DONE;
6730
6731         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6732
6733         if (ret != I40E_SUCCESS)
6734                 goto DONE;
6735
6736         i40e_set_vlan_filter(vsi, vlan, 1);
6737
6738         vsi->vlan_num++;
6739         ret = I40E_SUCCESS;
6740 DONE:
6741         rte_free(mv_f);
6742         return ret;
6743 }
6744
6745 int
6746 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6747 {
6748         struct i40e_macvlan_filter *mv_f;
6749         int mac_num;
6750         int ret = I40E_SUCCESS;
6751
6752         /**
6753          * Vlan 0 is the generic filter for untagged packets
6754          * and can't be removed.
6755          */
6756         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6757                 return I40E_ERR_PARAM;
6758
6759         /* If can't find it, just return */
6760         if (!i40e_find_vlan_filter(vsi, vlan))
6761                 return I40E_ERR_PARAM;
6762
6763         mac_num = vsi->mac_num;
6764
6765         if (mac_num == 0) {
6766                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6767                 return I40E_ERR_PARAM;
6768         }
6769
6770         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6771
6772         if (mv_f == NULL) {
6773                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6774                 return I40E_ERR_NO_MEMORY;
6775         }
6776
6777         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6778
6779         if (ret != I40E_SUCCESS)
6780                 goto DONE;
6781
6782         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6783
6784         if (ret != I40E_SUCCESS)
6785                 goto DONE;
6786
6787         /* This is last vlan to remove, replace all mac filter with vlan 0 */
6788         if (vsi->vlan_num == 1) {
6789                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6790                 if (ret != I40E_SUCCESS)
6791                         goto DONE;
6792
6793                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6794                 if (ret != I40E_SUCCESS)
6795                         goto DONE;
6796         }
6797
6798         i40e_set_vlan_filter(vsi, vlan, 0);
6799
6800         vsi->vlan_num--;
6801         ret = I40E_SUCCESS;
6802 DONE:
6803         rte_free(mv_f);
6804         return ret;
6805 }
6806
6807 int
6808 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6809 {
6810         struct i40e_mac_filter *f;
6811         struct i40e_macvlan_filter *mv_f;
6812         int i, vlan_num = 0;
6813         int ret = I40E_SUCCESS;
6814
6815         /* If it's add and we've config it, return */
6816         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6817         if (f != NULL)
6818                 return I40E_SUCCESS;
6819         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6820                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6821
6822                 /**
6823                  * If vlan_num is 0, that's the first time to add mac,
6824                  * set mask for vlan_id 0.
6825                  */
6826                 if (vsi->vlan_num == 0) {
6827                         i40e_set_vlan_filter(vsi, 0, 1);
6828                         vsi->vlan_num = 1;
6829                 }
6830                 vlan_num = vsi->vlan_num;
6831         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6832                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6833                 vlan_num = 1;
6834
6835         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6836         if (mv_f == NULL) {
6837                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6838                 return I40E_ERR_NO_MEMORY;
6839         }
6840
6841         for (i = 0; i < vlan_num; i++) {
6842                 mv_f[i].filter_type = mac_filter->filter_type;
6843                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6844                                 ETH_ADDR_LEN);
6845         }
6846
6847         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6848                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6849                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6850                                         &mac_filter->mac_addr);
6851                 if (ret != I40E_SUCCESS)
6852                         goto DONE;
6853         }
6854
6855         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6856         if (ret != I40E_SUCCESS)
6857                 goto DONE;
6858
6859         /* Add the mac addr into mac list */
6860         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6861         if (f == NULL) {
6862                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6863                 ret = I40E_ERR_NO_MEMORY;
6864                 goto DONE;
6865         }
6866         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6867                         ETH_ADDR_LEN);
6868         f->mac_info.filter_type = mac_filter->filter_type;
6869         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6870         vsi->mac_num++;
6871
6872         ret = I40E_SUCCESS;
6873 DONE:
6874         rte_free(mv_f);
6875
6876         return ret;
6877 }
6878
6879 int
6880 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
6881 {
6882         struct i40e_mac_filter *f;
6883         struct i40e_macvlan_filter *mv_f;
6884         int i, vlan_num;
6885         enum rte_mac_filter_type filter_type;
6886         int ret = I40E_SUCCESS;
6887
6888         /* Can't find it, return an error */
6889         f = i40e_find_mac_filter(vsi, addr);
6890         if (f == NULL)
6891                 return I40E_ERR_PARAM;
6892
6893         vlan_num = vsi->vlan_num;
6894         filter_type = f->mac_info.filter_type;
6895         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6896                 filter_type == RTE_MACVLAN_HASH_MATCH) {
6897                 if (vlan_num == 0) {
6898                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
6899                         return I40E_ERR_PARAM;
6900                 }
6901         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
6902                         filter_type == RTE_MAC_HASH_MATCH)
6903                 vlan_num = 1;
6904
6905         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6906         if (mv_f == NULL) {
6907                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6908                 return I40E_ERR_NO_MEMORY;
6909         }
6910
6911         for (i = 0; i < vlan_num; i++) {
6912                 mv_f[i].filter_type = filter_type;
6913                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6914                                 ETH_ADDR_LEN);
6915         }
6916         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6917                         filter_type == RTE_MACVLAN_HASH_MATCH) {
6918                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
6919                 if (ret != I40E_SUCCESS)
6920                         goto DONE;
6921         }
6922
6923         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
6924         if (ret != I40E_SUCCESS)
6925                 goto DONE;
6926
6927         /* Remove the mac addr into mac list */
6928         TAILQ_REMOVE(&vsi->mac_list, f, next);
6929         rte_free(f);
6930         vsi->mac_num--;
6931
6932         ret = I40E_SUCCESS;
6933 DONE:
6934         rte_free(mv_f);
6935         return ret;
6936 }
6937
6938 /* Configure hash enable flags for RSS */
6939 uint64_t
6940 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
6941 {
6942         uint64_t hena = 0;
6943         int i;
6944
6945         if (!flags)
6946                 return hena;
6947
6948         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
6949                 if (flags & (1ULL << i))
6950                         hena |= adapter->pctypes_tbl[i];
6951         }
6952
6953         return hena;
6954 }
6955
6956 /* Parse the hash enable flags */
6957 uint64_t
6958 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
6959 {
6960         uint64_t rss_hf = 0;
6961
6962         if (!flags)
6963                 return rss_hf;
6964         int i;
6965
6966         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
6967                 if (flags & adapter->pctypes_tbl[i])
6968                         rss_hf |= (1ULL << i);
6969         }
6970         return rss_hf;
6971 }
6972
6973 /* Disable RSS */
6974 static void
6975 i40e_pf_disable_rss(struct i40e_pf *pf)
6976 {
6977         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6978
6979         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
6980         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
6981         I40E_WRITE_FLUSH(hw);
6982 }
6983
6984 int
6985 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
6986 {
6987         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6988         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6989         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
6990                            I40E_VFQF_HKEY_MAX_INDEX :
6991                            I40E_PFQF_HKEY_MAX_INDEX;
6992         int ret = 0;
6993
6994         if (!key || key_len == 0) {
6995                 PMD_DRV_LOG(DEBUG, "No key to be configured");
6996                 return 0;
6997         } else if (key_len != (key_idx + 1) *
6998                 sizeof(uint32_t)) {
6999                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7000                 return -EINVAL;
7001         }
7002
7003         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7004                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7005                         (struct i40e_aqc_get_set_rss_key_data *)key;
7006
7007                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7008                 if (ret)
7009                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7010         } else {
7011                 uint32_t *hash_key = (uint32_t *)key;
7012                 uint16_t i;
7013
7014                 if (vsi->type == I40E_VSI_SRIOV) {
7015                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7016                                 I40E_WRITE_REG(
7017                                         hw,
7018                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7019                                         hash_key[i]);
7020
7021                 } else {
7022                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7023                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7024                                                hash_key[i]);
7025                 }
7026                 I40E_WRITE_FLUSH(hw);
7027         }
7028
7029         return ret;
7030 }
7031
7032 static int
7033 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7034 {
7035         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7036         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7037         uint32_t reg;
7038         int ret;
7039
7040         if (!key || !key_len)
7041                 return -EINVAL;
7042
7043         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7044                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7045                         (struct i40e_aqc_get_set_rss_key_data *)key);
7046                 if (ret) {
7047                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7048                         return ret;
7049                 }
7050         } else {
7051                 uint32_t *key_dw = (uint32_t *)key;
7052                 uint16_t i;
7053
7054                 if (vsi->type == I40E_VSI_SRIOV) {
7055                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7056                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7057                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7058                         }
7059                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7060                                    sizeof(uint32_t);
7061                 } else {
7062                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7063                                 reg = I40E_PFQF_HKEY(i);
7064                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7065                         }
7066                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7067                                    sizeof(uint32_t);
7068                 }
7069         }
7070         return 0;
7071 }
7072
7073 static int
7074 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7075 {
7076         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7077         uint64_t hena;
7078         int ret;
7079
7080         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7081                                rss_conf->rss_key_len);
7082         if (ret)
7083                 return ret;
7084
7085         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7086         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7087         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7088         I40E_WRITE_FLUSH(hw);
7089
7090         return 0;
7091 }
7092
7093 static int
7094 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7095                          struct rte_eth_rss_conf *rss_conf)
7096 {
7097         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7098         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7099         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7100         uint64_t hena;
7101
7102         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7103         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7104
7105         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7106                 if (rss_hf != 0) /* Enable RSS */
7107                         return -EINVAL;
7108                 return 0; /* Nothing to do */
7109         }
7110         /* RSS enabled */
7111         if (rss_hf == 0) /* Disable RSS */
7112                 return -EINVAL;
7113
7114         return i40e_hw_rss_hash_set(pf, rss_conf);
7115 }
7116
7117 static int
7118 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7119                            struct rte_eth_rss_conf *rss_conf)
7120 {
7121         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7122         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7123         uint64_t hena;
7124
7125         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7126                          &rss_conf->rss_key_len);
7127
7128         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7129         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7130         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7131
7132         return 0;
7133 }
7134
7135 static int
7136 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7137 {
7138         switch (filter_type) {
7139         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7140                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7141                 break;
7142         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7143                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7144                 break;
7145         case RTE_TUNNEL_FILTER_IMAC_TENID:
7146                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7147                 break;
7148         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7149                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7150                 break;
7151         case ETH_TUNNEL_FILTER_IMAC:
7152                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7153                 break;
7154         case ETH_TUNNEL_FILTER_OIP:
7155                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7156                 break;
7157         case ETH_TUNNEL_FILTER_IIP:
7158                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7159                 break;
7160         default:
7161                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7162                 return -EINVAL;
7163         }
7164
7165         return 0;
7166 }
7167
7168 /* Convert tunnel filter structure */
7169 static int
7170 i40e_tunnel_filter_convert(
7171         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
7172         struct i40e_tunnel_filter *tunnel_filter)
7173 {
7174         ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
7175                         (struct ether_addr *)&tunnel_filter->input.outer_mac);
7176         ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
7177                         (struct ether_addr *)&tunnel_filter->input.inner_mac);
7178         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7179         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7180              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7181             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7182                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7183         else
7184                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7185         tunnel_filter->input.flags = cld_filter->element.flags;
7186         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7187         tunnel_filter->queue = cld_filter->element.queue_number;
7188         rte_memcpy(tunnel_filter->input.general_fields,
7189                    cld_filter->general_fields,
7190                    sizeof(cld_filter->general_fields));
7191
7192         return 0;
7193 }
7194
7195 /* Check if there exists the tunnel filter */
7196 struct i40e_tunnel_filter *
7197 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7198                              const struct i40e_tunnel_filter_input *input)
7199 {
7200         int ret;
7201
7202         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7203         if (ret < 0)
7204                 return NULL;
7205
7206         return tunnel_rule->hash_map[ret];
7207 }
7208
7209 /* Add a tunnel filter into the SW list */
7210 static int
7211 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7212                              struct i40e_tunnel_filter *tunnel_filter)
7213 {
7214         struct i40e_tunnel_rule *rule = &pf->tunnel;
7215         int ret;
7216
7217         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7218         if (ret < 0) {
7219                 PMD_DRV_LOG(ERR,
7220                             "Failed to insert tunnel filter to hash table %d!",
7221                             ret);
7222                 return ret;
7223         }
7224         rule->hash_map[ret] = tunnel_filter;
7225
7226         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7227
7228         return 0;
7229 }
7230
7231 /* Delete a tunnel filter from the SW list */
7232 int
7233 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7234                           struct i40e_tunnel_filter_input *input)
7235 {
7236         struct i40e_tunnel_rule *rule = &pf->tunnel;
7237         struct i40e_tunnel_filter *tunnel_filter;
7238         int ret;
7239
7240         ret = rte_hash_del_key(rule->hash_table, input);
7241         if (ret < 0) {
7242                 PMD_DRV_LOG(ERR,
7243                             "Failed to delete tunnel filter to hash table %d!",
7244                             ret);
7245                 return ret;
7246         }
7247         tunnel_filter = rule->hash_map[ret];
7248         rule->hash_map[ret] = NULL;
7249
7250         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7251         rte_free(tunnel_filter);
7252
7253         return 0;
7254 }
7255
7256 int
7257 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7258                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
7259                         uint8_t add)
7260 {
7261         uint16_t ip_type;
7262         uint32_t ipv4_addr, ipv4_addr_le;
7263         uint8_t i, tun_type = 0;
7264         /* internal varialbe to convert ipv6 byte order */
7265         uint32_t convert_ipv6[4];
7266         int val, ret = 0;
7267         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7268         struct i40e_vsi *vsi = pf->main_vsi;
7269         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7270         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7271         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7272         struct i40e_tunnel_filter *tunnel, *node;
7273         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7274
7275         cld_filter = rte_zmalloc("tunnel_filter",
7276                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7277         0);
7278
7279         if (NULL == cld_filter) {
7280                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7281                 return -ENOMEM;
7282         }
7283         pfilter = cld_filter;
7284
7285         ether_addr_copy(&tunnel_filter->outer_mac,
7286                         (struct ether_addr *)&pfilter->element.outer_mac);
7287         ether_addr_copy(&tunnel_filter->inner_mac,
7288                         (struct ether_addr *)&pfilter->element.inner_mac);
7289
7290         pfilter->element.inner_vlan =
7291                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7292         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7293                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7294                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7295                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7296                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7297                                 &ipv4_addr_le,
7298                                 sizeof(pfilter->element.ipaddr.v4.data));
7299         } else {
7300                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7301                 for (i = 0; i < 4; i++) {
7302                         convert_ipv6[i] =
7303                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7304                 }
7305                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7306                            &convert_ipv6,
7307                            sizeof(pfilter->element.ipaddr.v6.data));
7308         }
7309
7310         /* check tunneled type */
7311         switch (tunnel_filter->tunnel_type) {
7312         case RTE_TUNNEL_TYPE_VXLAN:
7313                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7314                 break;
7315         case RTE_TUNNEL_TYPE_NVGRE:
7316                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7317                 break;
7318         case RTE_TUNNEL_TYPE_IP_IN_GRE:
7319                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7320                 break;
7321         default:
7322                 /* Other tunnel types is not supported. */
7323                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7324                 rte_free(cld_filter);
7325                 return -EINVAL;
7326         }
7327
7328         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7329                                        &pfilter->element.flags);
7330         if (val < 0) {
7331                 rte_free(cld_filter);
7332                 return -EINVAL;
7333         }
7334
7335         pfilter->element.flags |= rte_cpu_to_le_16(
7336                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7337                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7338         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7339         pfilter->element.queue_number =
7340                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7341
7342         /* Check if there is the filter in SW list */
7343         memset(&check_filter, 0, sizeof(check_filter));
7344         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7345         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7346         if (add && node) {
7347                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7348                 rte_free(cld_filter);
7349                 return -EINVAL;
7350         }
7351
7352         if (!add && !node) {
7353                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7354                 rte_free(cld_filter);
7355                 return -EINVAL;
7356         }
7357
7358         if (add) {
7359                 ret = i40e_aq_add_cloud_filters(hw,
7360                                         vsi->seid, &cld_filter->element, 1);
7361                 if (ret < 0) {
7362                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7363                         rte_free(cld_filter);
7364                         return -ENOTSUP;
7365                 }
7366                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7367                 if (tunnel == NULL) {
7368                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7369                         rte_free(cld_filter);
7370                         return -ENOMEM;
7371                 }
7372
7373                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7374                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7375                 if (ret < 0)
7376                         rte_free(tunnel);
7377         } else {
7378                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7379                                                    &cld_filter->element, 1);
7380                 if (ret < 0) {
7381                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7382                         rte_free(cld_filter);
7383                         return -ENOTSUP;
7384                 }
7385                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7386         }
7387
7388         rte_free(cld_filter);
7389         return ret;
7390 }
7391
7392 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7393 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7394 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7395 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7396 #define I40E_TR_GRE_KEY_MASK                    0x400
7397 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7398 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7399
7400 static enum
7401 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7402 {
7403         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7404         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7405         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7406         enum i40e_status_code status = I40E_SUCCESS;
7407
7408         if (pf->support_multi_driver) {
7409                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7410                 return I40E_NOT_SUPPORTED;
7411         }
7412
7413         memset(&filter_replace, 0,
7414                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7415         memset(&filter_replace_buf, 0,
7416                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7417
7418         /* create L1 filter */
7419         filter_replace.old_filter_type =
7420                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7421         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7422         filter_replace.tr_bit = 0;
7423
7424         /* Prepare the buffer, 3 entries */
7425         filter_replace_buf.data[0] =
7426                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7427         filter_replace_buf.data[0] |=
7428                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7429         filter_replace_buf.data[2] = 0xFF;
7430         filter_replace_buf.data[3] = 0xFF;
7431         filter_replace_buf.data[4] =
7432                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7433         filter_replace_buf.data[4] |=
7434                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7435         filter_replace_buf.data[7] = 0xF0;
7436         filter_replace_buf.data[8]
7437                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7438         filter_replace_buf.data[8] |=
7439                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7440         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7441                 I40E_TR_GENEVE_KEY_MASK |
7442                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7443         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7444                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7445                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7446
7447         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7448                                                &filter_replace_buf);
7449         if (!status) {
7450                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7451                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7452                             "cloud l1 type is changed from 0x%x to 0x%x",
7453                             filter_replace.old_filter_type,
7454                             filter_replace.new_filter_type);
7455         }
7456         return status;
7457 }
7458
7459 static enum
7460 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7461 {
7462         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7463         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7464         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7465         enum i40e_status_code status = I40E_SUCCESS;
7466
7467         if (pf->support_multi_driver) {
7468                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7469                 return I40E_NOT_SUPPORTED;
7470         }
7471
7472         /* For MPLSoUDP */
7473         memset(&filter_replace, 0,
7474                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7475         memset(&filter_replace_buf, 0,
7476                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7477         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7478                 I40E_AQC_MIRROR_CLOUD_FILTER;
7479         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7480         filter_replace.new_filter_type =
7481                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7482         /* Prepare the buffer, 2 entries */
7483         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7484         filter_replace_buf.data[0] |=
7485                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7486         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7487         filter_replace_buf.data[4] |=
7488                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7489         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7490                                                &filter_replace_buf);
7491         if (status < 0)
7492                 return status;
7493         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7494                     "cloud filter type is changed from 0x%x to 0x%x",
7495                     filter_replace.old_filter_type,
7496                     filter_replace.new_filter_type);
7497
7498         /* For MPLSoGRE */
7499         memset(&filter_replace, 0,
7500                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7501         memset(&filter_replace_buf, 0,
7502                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7503
7504         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7505                 I40E_AQC_MIRROR_CLOUD_FILTER;
7506         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7507         filter_replace.new_filter_type =
7508                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7509         /* Prepare the buffer, 2 entries */
7510         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7511         filter_replace_buf.data[0] |=
7512                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7513         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7514         filter_replace_buf.data[4] |=
7515                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7516
7517         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7518                                                &filter_replace_buf);
7519         if (!status) {
7520                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7521                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7522                             "cloud filter type is changed from 0x%x to 0x%x",
7523                             filter_replace.old_filter_type,
7524                             filter_replace.new_filter_type);
7525         }
7526         return status;
7527 }
7528
7529 static enum i40e_status_code
7530 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7531 {
7532         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7533         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7534         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7535         enum i40e_status_code status = I40E_SUCCESS;
7536
7537         if (pf->support_multi_driver) {
7538                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7539                 return I40E_NOT_SUPPORTED;
7540         }
7541
7542         /* For GTP-C */
7543         memset(&filter_replace, 0,
7544                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7545         memset(&filter_replace_buf, 0,
7546                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7547         /* create L1 filter */
7548         filter_replace.old_filter_type =
7549                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7550         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
7551         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
7552                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7553         /* Prepare the buffer, 2 entries */
7554         filter_replace_buf.data[0] =
7555                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7556         filter_replace_buf.data[0] |=
7557                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7558         filter_replace_buf.data[2] = 0xFF;
7559         filter_replace_buf.data[3] = 0xFF;
7560         filter_replace_buf.data[4] =
7561                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7562         filter_replace_buf.data[4] |=
7563                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7564         filter_replace_buf.data[6] = 0xFF;
7565         filter_replace_buf.data[7] = 0xFF;
7566         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7567                                                &filter_replace_buf);
7568         if (status < 0)
7569                 return status;
7570         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7571                     "cloud l1 type is changed from 0x%x to 0x%x",
7572                     filter_replace.old_filter_type,
7573                     filter_replace.new_filter_type);
7574
7575         /* for GTP-U */
7576         memset(&filter_replace, 0,
7577                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7578         memset(&filter_replace_buf, 0,
7579                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7580         /* create L1 filter */
7581         filter_replace.old_filter_type =
7582                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
7583         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
7584         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
7585                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7586         /* Prepare the buffer, 2 entries */
7587         filter_replace_buf.data[0] =
7588                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7589         filter_replace_buf.data[0] |=
7590                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7591         filter_replace_buf.data[2] = 0xFF;
7592         filter_replace_buf.data[3] = 0xFF;
7593         filter_replace_buf.data[4] =
7594                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7595         filter_replace_buf.data[4] |=
7596                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7597         filter_replace_buf.data[6] = 0xFF;
7598         filter_replace_buf.data[7] = 0xFF;
7599
7600         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7601                                                &filter_replace_buf);
7602         if (!status) {
7603                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7604                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7605                             "cloud l1 type is changed from 0x%x to 0x%x",
7606                             filter_replace.old_filter_type,
7607                             filter_replace.new_filter_type);
7608         }
7609         return status;
7610 }
7611
7612 static enum
7613 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
7614 {
7615         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7616         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7617         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7618         enum i40e_status_code status = I40E_SUCCESS;
7619
7620         if (pf->support_multi_driver) {
7621                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7622                 return I40E_NOT_SUPPORTED;
7623         }
7624
7625         /* for GTP-C */
7626         memset(&filter_replace, 0,
7627                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7628         memset(&filter_replace_buf, 0,
7629                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7630         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7631         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7632         filter_replace.new_filter_type =
7633                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7634         /* Prepare the buffer, 2 entries */
7635         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
7636         filter_replace_buf.data[0] |=
7637                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7638         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7639         filter_replace_buf.data[4] |=
7640                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7641         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7642                                                &filter_replace_buf);
7643         if (status < 0)
7644                 return status;
7645         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7646                     "cloud filter type is changed from 0x%x to 0x%x",
7647                     filter_replace.old_filter_type,
7648                     filter_replace.new_filter_type);
7649
7650         /* for GTP-U */
7651         memset(&filter_replace, 0,
7652                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7653         memset(&filter_replace_buf, 0,
7654                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7655         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7656         filter_replace.old_filter_type =
7657                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7658         filter_replace.new_filter_type =
7659                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7660         /* Prepare the buffer, 2 entries */
7661         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
7662         filter_replace_buf.data[0] |=
7663                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7664         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7665         filter_replace_buf.data[4] |=
7666                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7667
7668         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7669                                                &filter_replace_buf);
7670         if (!status) {
7671                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7672                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7673                             "cloud filter type is changed from 0x%x to 0x%x",
7674                             filter_replace.old_filter_type,
7675                             filter_replace.new_filter_type);
7676         }
7677         return status;
7678 }
7679
7680 int
7681 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7682                       struct i40e_tunnel_filter_conf *tunnel_filter,
7683                       uint8_t add)
7684 {
7685         uint16_t ip_type;
7686         uint32_t ipv4_addr, ipv4_addr_le;
7687         uint8_t i, tun_type = 0;
7688         /* internal variable to convert ipv6 byte order */
7689         uint32_t convert_ipv6[4];
7690         int val, ret = 0;
7691         struct i40e_pf_vf *vf = NULL;
7692         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7693         struct i40e_vsi *vsi;
7694         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7695         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7696         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7697         struct i40e_tunnel_filter *tunnel, *node;
7698         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7699         uint32_t teid_le;
7700         bool big_buffer = 0;
7701
7702         cld_filter = rte_zmalloc("tunnel_filter",
7703                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7704                          0);
7705
7706         if (cld_filter == NULL) {
7707                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7708                 return -ENOMEM;
7709         }
7710         pfilter = cld_filter;
7711
7712         ether_addr_copy(&tunnel_filter->outer_mac,
7713                         (struct ether_addr *)&pfilter->element.outer_mac);
7714         ether_addr_copy(&tunnel_filter->inner_mac,
7715                         (struct ether_addr *)&pfilter->element.inner_mac);
7716
7717         pfilter->element.inner_vlan =
7718                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7719         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7720                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7721                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7722                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7723                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7724                                 &ipv4_addr_le,
7725                                 sizeof(pfilter->element.ipaddr.v4.data));
7726         } else {
7727                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7728                 for (i = 0; i < 4; i++) {
7729                         convert_ipv6[i] =
7730                         rte_cpu_to_le_32(rte_be_to_cpu_32(
7731                                          tunnel_filter->ip_addr.ipv6_addr[i]));
7732                 }
7733                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7734                            &convert_ipv6,
7735                            sizeof(pfilter->element.ipaddr.v6.data));
7736         }
7737
7738         /* check tunneled type */
7739         switch (tunnel_filter->tunnel_type) {
7740         case I40E_TUNNEL_TYPE_VXLAN:
7741                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7742                 break;
7743         case I40E_TUNNEL_TYPE_NVGRE:
7744                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7745                 break;
7746         case I40E_TUNNEL_TYPE_IP_IN_GRE:
7747                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7748                 break;
7749         case I40E_TUNNEL_TYPE_MPLSoUDP:
7750                 if (!pf->mpls_replace_flag) {
7751                         i40e_replace_mpls_l1_filter(pf);
7752                         i40e_replace_mpls_cloud_filter(pf);
7753                         pf->mpls_replace_flag = 1;
7754                 }
7755                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7756                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7757                         teid_le >> 4;
7758                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7759                         (teid_le & 0xF) << 12;
7760                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7761                         0x40;
7762                 big_buffer = 1;
7763                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
7764                 break;
7765         case I40E_TUNNEL_TYPE_MPLSoGRE:
7766                 if (!pf->mpls_replace_flag) {
7767                         i40e_replace_mpls_l1_filter(pf);
7768                         i40e_replace_mpls_cloud_filter(pf);
7769                         pf->mpls_replace_flag = 1;
7770                 }
7771                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7772                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7773                         teid_le >> 4;
7774                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7775                         (teid_le & 0xF) << 12;
7776                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7777                         0x0;
7778                 big_buffer = 1;
7779                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
7780                 break;
7781         case I40E_TUNNEL_TYPE_GTPC:
7782                 if (!pf->gtp_replace_flag) {
7783                         i40e_replace_gtp_l1_filter(pf);
7784                         i40e_replace_gtp_cloud_filter(pf);
7785                         pf->gtp_replace_flag = 1;
7786                 }
7787                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7788                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
7789                         (teid_le >> 16) & 0xFFFF;
7790                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
7791                         teid_le & 0xFFFF;
7792                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
7793                         0x0;
7794                 big_buffer = 1;
7795                 break;
7796         case I40E_TUNNEL_TYPE_GTPU:
7797                 if (!pf->gtp_replace_flag) {
7798                         i40e_replace_gtp_l1_filter(pf);
7799                         i40e_replace_gtp_cloud_filter(pf);
7800                         pf->gtp_replace_flag = 1;
7801                 }
7802                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7803                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
7804                         (teid_le >> 16) & 0xFFFF;
7805                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
7806                         teid_le & 0xFFFF;
7807                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
7808                         0x0;
7809                 big_buffer = 1;
7810                 break;
7811         case I40E_TUNNEL_TYPE_QINQ:
7812                 if (!pf->qinq_replace_flag) {
7813                         ret = i40e_cloud_filter_qinq_create(pf);
7814                         if (ret < 0)
7815                                 PMD_DRV_LOG(DEBUG,
7816                                             "QinQ tunnel filter already created.");
7817                         pf->qinq_replace_flag = 1;
7818                 }
7819                 /*      Add in the General fields the values of
7820                  *      the Outer and Inner VLAN
7821                  *      Big Buffer should be set, see changes in
7822                  *      i40e_aq_add_cloud_filters
7823                  */
7824                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
7825                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
7826                 big_buffer = 1;
7827                 break;
7828         default:
7829                 /* Other tunnel types is not supported. */
7830                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7831                 rte_free(cld_filter);
7832                 return -EINVAL;
7833         }
7834
7835         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
7836                 pfilter->element.flags =
7837                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7838         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
7839                 pfilter->element.flags =
7840                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7841         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
7842                 pfilter->element.flags =
7843                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7844         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
7845                 pfilter->element.flags =
7846                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7847         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
7848                 pfilter->element.flags |=
7849                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
7850         else {
7851                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7852                                                 &pfilter->element.flags);
7853                 if (val < 0) {
7854                         rte_free(cld_filter);
7855                         return -EINVAL;
7856                 }
7857         }
7858
7859         pfilter->element.flags |= rte_cpu_to_le_16(
7860                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7861                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7862         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7863         pfilter->element.queue_number =
7864                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7865
7866         if (!tunnel_filter->is_to_vf)
7867                 vsi = pf->main_vsi;
7868         else {
7869                 if (tunnel_filter->vf_id >= pf->vf_num) {
7870                         PMD_DRV_LOG(ERR, "Invalid argument.");
7871                         rte_free(cld_filter);
7872                         return -EINVAL;
7873                 }
7874                 vf = &pf->vfs[tunnel_filter->vf_id];
7875                 vsi = vf->vsi;
7876         }
7877
7878         /* Check if there is the filter in SW list */
7879         memset(&check_filter, 0, sizeof(check_filter));
7880         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7881         check_filter.is_to_vf = tunnel_filter->is_to_vf;
7882         check_filter.vf_id = tunnel_filter->vf_id;
7883         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7884         if (add && node) {
7885                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7886                 rte_free(cld_filter);
7887                 return -EINVAL;
7888         }
7889
7890         if (!add && !node) {
7891                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7892                 rte_free(cld_filter);
7893                 return -EINVAL;
7894         }
7895
7896         if (add) {
7897                 if (big_buffer)
7898                         ret = i40e_aq_add_cloud_filters_big_buffer(hw,
7899                                                    vsi->seid, cld_filter, 1);
7900                 else
7901                         ret = i40e_aq_add_cloud_filters(hw,
7902                                         vsi->seid, &cld_filter->element, 1);
7903                 if (ret < 0) {
7904                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7905                         rte_free(cld_filter);
7906                         return -ENOTSUP;
7907                 }
7908                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7909                 if (tunnel == NULL) {
7910                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7911                         rte_free(cld_filter);
7912                         return -ENOMEM;
7913                 }
7914
7915                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7916                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7917                 if (ret < 0)
7918                         rte_free(tunnel);
7919         } else {
7920                 if (big_buffer)
7921                         ret = i40e_aq_remove_cloud_filters_big_buffer(
7922                                 hw, vsi->seid, cld_filter, 1);
7923                 else
7924                         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7925                                                    &cld_filter->element, 1);
7926                 if (ret < 0) {
7927                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7928                         rte_free(cld_filter);
7929                         return -ENOTSUP;
7930                 }
7931                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7932         }
7933
7934         rte_free(cld_filter);
7935         return ret;
7936 }
7937
7938 static int
7939 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
7940 {
7941         uint8_t i;
7942
7943         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7944                 if (pf->vxlan_ports[i] == port)
7945                         return i;
7946         }
7947
7948         return -1;
7949 }
7950
7951 static int
7952 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
7953 {
7954         int  idx, ret;
7955         uint8_t filter_idx;
7956         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7957
7958         idx = i40e_get_vxlan_port_idx(pf, port);
7959
7960         /* Check if port already exists */
7961         if (idx >= 0) {
7962                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
7963                 return -EINVAL;
7964         }
7965
7966         /* Now check if there is space to add the new port */
7967         idx = i40e_get_vxlan_port_idx(pf, 0);
7968         if (idx < 0) {
7969                 PMD_DRV_LOG(ERR,
7970                         "Maximum number of UDP ports reached, not adding port %d",
7971                         port);
7972                 return -ENOSPC;
7973         }
7974
7975         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
7976                                         &filter_idx, NULL);
7977         if (ret < 0) {
7978                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
7979                 return -1;
7980         }
7981
7982         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
7983                          port,  filter_idx);
7984
7985         /* New port: add it and mark its index in the bitmap */
7986         pf->vxlan_ports[idx] = port;
7987         pf->vxlan_bitmap |= (1 << idx);
7988
7989         if (!(pf->flags & I40E_FLAG_VXLAN))
7990                 pf->flags |= I40E_FLAG_VXLAN;
7991
7992         return 0;
7993 }
7994
7995 static int
7996 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
7997 {
7998         int idx;
7999         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8000
8001         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8002                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8003                 return -EINVAL;
8004         }
8005
8006         idx = i40e_get_vxlan_port_idx(pf, port);
8007
8008         if (idx < 0) {
8009                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8010                 return -EINVAL;
8011         }
8012
8013         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8014                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8015                 return -1;
8016         }
8017
8018         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8019                         port, idx);
8020
8021         pf->vxlan_ports[idx] = 0;
8022         pf->vxlan_bitmap &= ~(1 << idx);
8023
8024         if (!pf->vxlan_bitmap)
8025                 pf->flags &= ~I40E_FLAG_VXLAN;
8026
8027         return 0;
8028 }
8029
8030 /* Add UDP tunneling port */
8031 static int
8032 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8033                              struct rte_eth_udp_tunnel *udp_tunnel)
8034 {
8035         int ret = 0;
8036         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8037
8038         if (udp_tunnel == NULL)
8039                 return -EINVAL;
8040
8041         switch (udp_tunnel->prot_type) {
8042         case RTE_TUNNEL_TYPE_VXLAN:
8043                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
8044                 break;
8045
8046         case RTE_TUNNEL_TYPE_GENEVE:
8047         case RTE_TUNNEL_TYPE_TEREDO:
8048                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8049                 ret = -1;
8050                 break;
8051
8052         default:
8053                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8054                 ret = -1;
8055                 break;
8056         }
8057
8058         return ret;
8059 }
8060
8061 /* Remove UDP tunneling port */
8062 static int
8063 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8064                              struct rte_eth_udp_tunnel *udp_tunnel)
8065 {
8066         int ret = 0;
8067         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8068
8069         if (udp_tunnel == NULL)
8070                 return -EINVAL;
8071
8072         switch (udp_tunnel->prot_type) {
8073         case RTE_TUNNEL_TYPE_VXLAN:
8074                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8075                 break;
8076         case RTE_TUNNEL_TYPE_GENEVE:
8077         case RTE_TUNNEL_TYPE_TEREDO:
8078                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8079                 ret = -1;
8080                 break;
8081         default:
8082                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8083                 ret = -1;
8084                 break;
8085         }
8086
8087         return ret;
8088 }
8089
8090 /* Calculate the maximum number of contiguous PF queues that are configured */
8091 static int
8092 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8093 {
8094         struct rte_eth_dev_data *data = pf->dev_data;
8095         int i, num;
8096         struct i40e_rx_queue *rxq;
8097
8098         num = 0;
8099         for (i = 0; i < pf->lan_nb_qps; i++) {
8100                 rxq = data->rx_queues[i];
8101                 if (rxq && rxq->q_set)
8102                         num++;
8103                 else
8104                         break;
8105         }
8106
8107         return num;
8108 }
8109
8110 /* Configure RSS */
8111 static int
8112 i40e_pf_config_rss(struct i40e_pf *pf)
8113 {
8114         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8115         struct rte_eth_rss_conf rss_conf;
8116         uint32_t i, lut = 0;
8117         uint16_t j, num;
8118
8119         /*
8120          * If both VMDQ and RSS enabled, not all of PF queues are configured.
8121          * It's necessary to calculate the actual PF queues that are configured.
8122          */
8123         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8124                 num = i40e_pf_calc_configured_queues_num(pf);
8125         else
8126                 num = pf->dev_data->nb_rx_queues;
8127
8128         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8129         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
8130                         num);
8131
8132         if (num == 0) {
8133                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
8134                 return -ENOTSUP;
8135         }
8136
8137         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
8138                 if (j == num)
8139                         j = 0;
8140                 lut = (lut << 8) | (j & ((0x1 <<
8141                         hw->func_caps.rss_table_entry_width) - 1));
8142                 if ((i & 3) == 3)
8143                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
8144         }
8145
8146         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
8147         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
8148                 i40e_pf_disable_rss(pf);
8149                 return 0;
8150         }
8151         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
8152                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
8153                 /* Random default keys */
8154                 static uint32_t rss_key_default[] = {0x6b793944,
8155                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8156                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8157                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8158
8159                 rss_conf.rss_key = (uint8_t *)rss_key_default;
8160                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8161                                                         sizeof(uint32_t);
8162         }
8163
8164         return i40e_hw_rss_hash_set(pf, &rss_conf);
8165 }
8166
8167 static int
8168 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
8169                                struct rte_eth_tunnel_filter_conf *filter)
8170 {
8171         if (pf == NULL || filter == NULL) {
8172                 PMD_DRV_LOG(ERR, "Invalid parameter");
8173                 return -EINVAL;
8174         }
8175
8176         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
8177                 PMD_DRV_LOG(ERR, "Invalid queue ID");
8178                 return -EINVAL;
8179         }
8180
8181         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
8182                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
8183                 return -EINVAL;
8184         }
8185
8186         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
8187                 (is_zero_ether_addr(&filter->outer_mac))) {
8188                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
8189                 return -EINVAL;
8190         }
8191
8192         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
8193                 (is_zero_ether_addr(&filter->inner_mac))) {
8194                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
8195                 return -EINVAL;
8196         }
8197
8198         return 0;
8199 }
8200
8201 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8202 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8203 static int
8204 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8205 {
8206         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8207         uint32_t val, reg;
8208         int ret = -EINVAL;
8209
8210         if (pf->support_multi_driver) {
8211                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8212                 return -ENOTSUP;
8213         }
8214
8215         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8216         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8217
8218         if (len == 3) {
8219                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8220         } else if (len == 4) {
8221                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8222         } else {
8223                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8224                 return ret;
8225         }
8226
8227         if (reg != val) {
8228                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
8229                                                    reg, NULL);
8230                 if (ret != 0)
8231                         return ret;
8232                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8233                             "with value 0x%08x",
8234                             I40E_GL_PRS_FVBM(2), reg);
8235                 i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
8236         } else {
8237                 ret = 0;
8238         }
8239         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8240                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8241
8242         return ret;
8243 }
8244
8245 static int
8246 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
8247 {
8248         int ret = -EINVAL;
8249
8250         if (!hw || !cfg)
8251                 return -EINVAL;
8252
8253         switch (cfg->cfg_type) {
8254         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
8255                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
8256                 break;
8257         default:
8258                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
8259                 break;
8260         }
8261
8262         return ret;
8263 }
8264
8265 static int
8266 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
8267                                enum rte_filter_op filter_op,
8268                                void *arg)
8269 {
8270         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8271         int ret = I40E_ERR_PARAM;
8272
8273         switch (filter_op) {
8274         case RTE_ETH_FILTER_SET:
8275                 ret = i40e_dev_global_config_set(hw,
8276                         (struct rte_eth_global_cfg *)arg);
8277                 break;
8278         default:
8279                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8280                 break;
8281         }
8282
8283         return ret;
8284 }
8285
8286 static int
8287 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
8288                           enum rte_filter_op filter_op,
8289                           void *arg)
8290 {
8291         struct rte_eth_tunnel_filter_conf *filter;
8292         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8293         int ret = I40E_SUCCESS;
8294
8295         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
8296
8297         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
8298                 return I40E_ERR_PARAM;
8299
8300         switch (filter_op) {
8301         case RTE_ETH_FILTER_NOP:
8302                 if (!(pf->flags & I40E_FLAG_VXLAN))
8303                         ret = I40E_NOT_SUPPORTED;
8304                 break;
8305         case RTE_ETH_FILTER_ADD:
8306                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
8307                 break;
8308         case RTE_ETH_FILTER_DELETE:
8309                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
8310                 break;
8311         default:
8312                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8313                 ret = I40E_ERR_PARAM;
8314                 break;
8315         }
8316
8317         return ret;
8318 }
8319
8320 static int
8321 i40e_pf_config_mq_rx(struct i40e_pf *pf)
8322 {
8323         int ret = 0;
8324         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8325
8326         /* RSS setup */
8327         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
8328                 ret = i40e_pf_config_rss(pf);
8329         else
8330                 i40e_pf_disable_rss(pf);
8331
8332         return ret;
8333 }
8334
8335 /* Get the symmetric hash enable configurations per port */
8336 static void
8337 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
8338 {
8339         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8340
8341         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
8342 }
8343
8344 /* Set the symmetric hash enable configurations per port */
8345 static void
8346 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8347 {
8348         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8349
8350         if (enable > 0) {
8351                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
8352                         PMD_DRV_LOG(INFO,
8353                                 "Symmetric hash has already been enabled");
8354                         return;
8355                 }
8356                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8357         } else {
8358                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
8359                         PMD_DRV_LOG(INFO,
8360                                 "Symmetric hash has already been disabled");
8361                         return;
8362                 }
8363                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8364         }
8365         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8366         I40E_WRITE_FLUSH(hw);
8367 }
8368
8369 /*
8370  * Get global configurations of hash function type and symmetric hash enable
8371  * per flow type (pctype). Note that global configuration means it affects all
8372  * the ports on the same NIC.
8373  */
8374 static int
8375 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
8376                                    struct rte_eth_hash_global_conf *g_cfg)
8377 {
8378         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8379         uint32_t reg;
8380         uint16_t i, j;
8381
8382         memset(g_cfg, 0, sizeof(*g_cfg));
8383         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8384         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
8385                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
8386         else
8387                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
8388         PMD_DRV_LOG(DEBUG, "Hash function is %s",
8389                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
8390
8391         /*
8392          * As i40e supports less than 64 flow types, only first 64 bits need to
8393          * be checked.
8394          */
8395         for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8396                 g_cfg->valid_bit_mask[i] = 0ULL;
8397                 g_cfg->sym_hash_enable_mask[i] = 0ULL;
8398         }
8399
8400         g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
8401
8402         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
8403                 if (!adapter->pctypes_tbl[i])
8404                         continue;
8405                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8406                      j < I40E_FILTER_PCTYPE_MAX; j++) {
8407                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
8408                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
8409                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8410                                         g_cfg->sym_hash_enable_mask[0] |=
8411                                                                 (1ULL << i);
8412                                 }
8413                         }
8414                 }
8415         }
8416
8417         return 0;
8418 }
8419
8420 static int
8421 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
8422                               const struct rte_eth_hash_global_conf *g_cfg)
8423 {
8424         uint32_t i;
8425         uint64_t mask0, i40e_mask = adapter->flow_types_mask;
8426
8427         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
8428                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
8429                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
8430                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
8431                                                 g_cfg->hash_func);
8432                 return -EINVAL;
8433         }
8434
8435         /*
8436          * As i40e supports less than 64 flow types, only first 64 bits need to
8437          * be checked.
8438          */
8439         mask0 = g_cfg->valid_bit_mask[0];
8440         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8441                 if (i == 0) {
8442                         /* Check if any unsupported flow type configured */
8443                         if ((mask0 | i40e_mask) ^ i40e_mask)
8444                                 goto mask_err;
8445                 } else {
8446                         if (g_cfg->valid_bit_mask[i])
8447                                 goto mask_err;
8448                 }
8449         }
8450
8451         return 0;
8452
8453 mask_err:
8454         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
8455
8456         return -EINVAL;
8457 }
8458
8459 /*
8460  * Set global configurations of hash function type and symmetric hash enable
8461  * per flow type (pctype). Note any modifying global configuration will affect
8462  * all the ports on the same NIC.
8463  */
8464 static int
8465 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
8466                                    struct rte_eth_hash_global_conf *g_cfg)
8467 {
8468         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8469         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8470         int ret;
8471         uint16_t i, j;
8472         uint32_t reg;
8473         uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
8474
8475         if (pf->support_multi_driver) {
8476                 PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
8477                 return -ENOTSUP;
8478         }
8479
8480         /* Check the input parameters */
8481         ret = i40e_hash_global_config_check(adapter, g_cfg);
8482         if (ret < 0)
8483                 return ret;
8484
8485         /*
8486          * As i40e supports less than 64 flow types, only first 64 bits need to
8487          * be configured.
8488          */
8489         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
8490                 if (mask0 & (1UL << i)) {
8491                         reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
8492                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
8493
8494                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8495                              j < I40E_FILTER_PCTYPE_MAX; j++) {
8496                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
8497                                         i40e_write_global_rx_ctl(hw,
8498                                                           I40E_GLQF_HSYM(j),
8499                                                           reg);
8500                         }
8501                         i40e_global_cfg_warning(I40E_WARNING_HSYM);
8502                 }
8503         }
8504
8505         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8506         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
8507                 /* Toeplitz */
8508                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
8509                         PMD_DRV_LOG(DEBUG,
8510                                 "Hash function already set to Toeplitz");
8511                         goto out;
8512                 }
8513                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
8514         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
8515                 /* Simple XOR */
8516                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
8517                         PMD_DRV_LOG(DEBUG,
8518                                 "Hash function already set to Simple XOR");
8519                         goto out;
8520                 }
8521                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
8522         } else
8523                 /* Use the default, and keep it as it is */
8524                 goto out;
8525
8526         i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
8527         i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
8528
8529 out:
8530         I40E_WRITE_FLUSH(hw);
8531
8532         return 0;
8533 }
8534
8535 /**
8536  * Valid input sets for hash and flow director filters per PCTYPE
8537  */
8538 static uint64_t
8539 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8540                 enum rte_filter_type filter)
8541 {
8542         uint64_t valid;
8543
8544         static const uint64_t valid_hash_inset_table[] = {
8545                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8546                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8547                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8548                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8549                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8550                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8551                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8552                         I40E_INSET_FLEX_PAYLOAD,
8553                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8554                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8555                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8556                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8557                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8558                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8559                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8560                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8561                         I40E_INSET_FLEX_PAYLOAD,
8562                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8563                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8564                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8565                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8566                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8567                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8568                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8569                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8570                         I40E_INSET_FLEX_PAYLOAD,
8571                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8572                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8573                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8574                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8575                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8576                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8577                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8578                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8579                         I40E_INSET_FLEX_PAYLOAD,
8580                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8581                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8582                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8583                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8584                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8585                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8586                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8587                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8588                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8589                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8590                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8591                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8592                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8593                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8594                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8595                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8596                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8597                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8598                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8599                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8600                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8601                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8602                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8603                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8604                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8605                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8606                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
8607                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8608                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8609                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8610                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8611                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8612                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8613                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8614                         I40E_INSET_FLEX_PAYLOAD,
8615                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8616                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8617                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8618                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8619                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8620                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
8621                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
8622                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
8623                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8624                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8625                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8626                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8627                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8628                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8629                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8630                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
8631                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8632                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8633                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8634                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8635                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8636                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8637                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8638                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8639                         I40E_INSET_FLEX_PAYLOAD,
8640                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8641                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8642                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8643                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8644                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8645                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8646                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8647                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8648                         I40E_INSET_FLEX_PAYLOAD,
8649                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8650                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8651                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8652                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8653                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8654                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8655                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8656                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8657                         I40E_INSET_FLEX_PAYLOAD,
8658                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8659                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8660                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8661                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8662                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8663                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8664                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8665                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8666                         I40E_INSET_FLEX_PAYLOAD,
8667                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8668                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8669                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8670                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8671                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8672                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8673                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8674                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8675                         I40E_INSET_FLEX_PAYLOAD,
8676                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8677                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8678                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8679                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8680                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8681                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8682                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8683                         I40E_INSET_FLEX_PAYLOAD,
8684                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8685                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8686                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8687                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8688                         I40E_INSET_FLEX_PAYLOAD,
8689         };
8690
8691         /**
8692          * Flow director supports only fields defined in
8693          * union rte_eth_fdir_flow.
8694          */
8695         static const uint64_t valid_fdir_inset_table[] = {
8696                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8697                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8698                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8699                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8700                 I40E_INSET_IPV4_TTL,
8701                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8702                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8703                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8704                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8705                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8706                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8707                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8708                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8709                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8710                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8711                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8712                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8713                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8714                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8715                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8716                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8717                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8718                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8719                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8720                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8721                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8722                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8723                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8724                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8725                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8726                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8727                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8728                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8729                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8730                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8731                 I40E_INSET_SCTP_VT,
8732                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8733                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8734                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8735                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8736                 I40E_INSET_IPV4_TTL,
8737                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8738                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8739                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8740                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8741                 I40E_INSET_IPV6_HOP_LIMIT,
8742                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8743                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8744                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8745                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8746                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8747                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8748                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8749                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8750                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8751                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8752                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8753                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8754                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8755                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8756                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8757                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8758                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8759                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8760                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8761                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8762                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8763                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8764                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8765                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8766                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8767                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8768                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8769                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8770                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8771                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8772                 I40E_INSET_SCTP_VT,
8773                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8774                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8775                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8776                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8777                 I40E_INSET_IPV6_HOP_LIMIT,
8778                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8779                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8780                 I40E_INSET_LAST_ETHER_TYPE,
8781         };
8782
8783         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8784                 return 0;
8785         if (filter == RTE_ETH_FILTER_HASH)
8786                 valid = valid_hash_inset_table[pctype];
8787         else
8788                 valid = valid_fdir_inset_table[pctype];
8789
8790         return valid;
8791 }
8792
8793 /**
8794  * Validate if the input set is allowed for a specific PCTYPE
8795  */
8796 int
8797 i40e_validate_input_set(enum i40e_filter_pctype pctype,
8798                 enum rte_filter_type filter, uint64_t inset)
8799 {
8800         uint64_t valid;
8801
8802         valid = i40e_get_valid_input_set(pctype, filter);
8803         if (inset & (~valid))
8804                 return -EINVAL;
8805
8806         return 0;
8807 }
8808
8809 /* default input set fields combination per pctype */
8810 uint64_t
8811 i40e_get_default_input_set(uint16_t pctype)
8812 {
8813         static const uint64_t default_inset_table[] = {
8814                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8815                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8816                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8817                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8818                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8819                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8820                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8821                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8822                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8823                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8824                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8825                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8826                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8827                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8828                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8829                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8830                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8831                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8832                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8833                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8834                         I40E_INSET_SCTP_VT,
8835                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8836                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8837                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8838                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8839                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8840                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8841                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8842                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8843                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8844                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8845                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8846                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8847                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8848                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8849                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8850                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8851                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8852                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8853                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8854                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8855                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8856                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8857                         I40E_INSET_SCTP_VT,
8858                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8859                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8860                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8861                         I40E_INSET_LAST_ETHER_TYPE,
8862         };
8863
8864         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8865                 return 0;
8866
8867         return default_inset_table[pctype];
8868 }
8869
8870 /**
8871  * Parse the input set from index to logical bit masks
8872  */
8873 static int
8874 i40e_parse_input_set(uint64_t *inset,
8875                      enum i40e_filter_pctype pctype,
8876                      enum rte_eth_input_set_field *field,
8877                      uint16_t size)
8878 {
8879         uint16_t i, j;
8880         int ret = -EINVAL;
8881
8882         static const struct {
8883                 enum rte_eth_input_set_field field;
8884                 uint64_t inset;
8885         } inset_convert_table[] = {
8886                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
8887                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
8888                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
8889                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
8890                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
8891                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
8892                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
8893                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
8894                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
8895                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
8896                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
8897                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
8898                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
8899                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
8900                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
8901                         I40E_INSET_IPV6_NEXT_HDR},
8902                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
8903                         I40E_INSET_IPV6_HOP_LIMIT},
8904                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
8905                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
8906                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
8907                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
8908                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
8909                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
8910                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
8911                         I40E_INSET_SCTP_VT},
8912                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
8913                         I40E_INSET_TUNNEL_DMAC},
8914                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
8915                         I40E_INSET_VLAN_TUNNEL},
8916                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
8917                         I40E_INSET_TUNNEL_ID},
8918                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
8919                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
8920                         I40E_INSET_FLEX_PAYLOAD_W1},
8921                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
8922                         I40E_INSET_FLEX_PAYLOAD_W2},
8923                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
8924                         I40E_INSET_FLEX_PAYLOAD_W3},
8925                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
8926                         I40E_INSET_FLEX_PAYLOAD_W4},
8927                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
8928                         I40E_INSET_FLEX_PAYLOAD_W5},
8929                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
8930                         I40E_INSET_FLEX_PAYLOAD_W6},
8931                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
8932                         I40E_INSET_FLEX_PAYLOAD_W7},
8933                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
8934                         I40E_INSET_FLEX_PAYLOAD_W8},
8935         };
8936
8937         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
8938                 return ret;
8939
8940         /* Only one item allowed for default or all */
8941         if (size == 1) {
8942                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
8943                         *inset = i40e_get_default_input_set(pctype);
8944                         return 0;
8945                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
8946                         *inset = I40E_INSET_NONE;
8947                         return 0;
8948                 }
8949         }
8950
8951         for (i = 0, *inset = 0; i < size; i++) {
8952                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
8953                         if (field[i] == inset_convert_table[j].field) {
8954                                 *inset |= inset_convert_table[j].inset;
8955                                 break;
8956                         }
8957                 }
8958
8959                 /* It contains unsupported input set, return immediately */
8960                 if (j == RTE_DIM(inset_convert_table))
8961                         return ret;
8962         }
8963
8964         return 0;
8965 }
8966
8967 /**
8968  * Translate the input set from bit masks to register aware bit masks
8969  * and vice versa
8970  */
8971 uint64_t
8972 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
8973 {
8974         uint64_t val = 0;
8975         uint16_t i;
8976
8977         struct inset_map {
8978                 uint64_t inset;
8979                 uint64_t inset_reg;
8980         };
8981
8982         static const struct inset_map inset_map_common[] = {
8983                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
8984                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
8985                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
8986                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
8987                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
8988                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
8989                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
8990                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
8991                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
8992                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
8993                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
8994                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
8995                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
8996                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
8997                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
8998                 {I40E_INSET_TUNNEL_DMAC,
8999                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9000                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9001                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9002                 {I40E_INSET_TUNNEL_SRC_PORT,
9003                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9004                 {I40E_INSET_TUNNEL_DST_PORT,
9005                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9006                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9007                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9008                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9009                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9010                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9011                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9012                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9013                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9014                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9015         };
9016
9017     /* some different registers map in x722*/
9018         static const struct inset_map inset_map_diff_x722[] = {
9019                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9020                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9021                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9022                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9023         };
9024
9025         static const struct inset_map inset_map_diff_not_x722[] = {
9026                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9027                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9028                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9029                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9030         };
9031
9032         if (input == 0)
9033                 return val;
9034
9035         /* Translate input set to register aware inset */
9036         if (type == I40E_MAC_X722) {
9037                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9038                         if (input & inset_map_diff_x722[i].inset)
9039                                 val |= inset_map_diff_x722[i].inset_reg;
9040                 }
9041         } else {
9042                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9043                         if (input & inset_map_diff_not_x722[i].inset)
9044                                 val |= inset_map_diff_not_x722[i].inset_reg;
9045                 }
9046         }
9047
9048         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9049                 if (input & inset_map_common[i].inset)
9050                         val |= inset_map_common[i].inset_reg;
9051         }
9052
9053         return val;
9054 }
9055
9056 int
9057 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9058 {
9059         uint8_t i, idx = 0;
9060         uint64_t inset_need_mask = inset;
9061
9062         static const struct {
9063                 uint64_t inset;
9064                 uint32_t mask;
9065         } inset_mask_map[] = {
9066                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
9067                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
9068                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
9069                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
9070                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
9071                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
9072                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
9073                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
9074         };
9075
9076         if (!inset || !mask || !nb_elem)
9077                 return 0;
9078
9079         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9080                 /* Clear the inset bit, if no MASK is required,
9081                  * for example proto + ttl
9082                  */
9083                 if ((inset & inset_mask_map[i].inset) ==
9084                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
9085                         inset_need_mask &= ~inset_mask_map[i].inset;
9086                 if (!inset_need_mask)
9087                         return 0;
9088         }
9089         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9090                 if ((inset_need_mask & inset_mask_map[i].inset) ==
9091                     inset_mask_map[i].inset) {
9092                         if (idx >= nb_elem) {
9093                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
9094                                 return -EINVAL;
9095                         }
9096                         mask[idx] = inset_mask_map[i].mask;
9097                         idx++;
9098                 }
9099         }
9100
9101         return idx;
9102 }
9103
9104 void
9105 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9106 {
9107         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9108
9109         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9110         if (reg != val)
9111                 i40e_write_rx_ctl(hw, addr, val);
9112         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9113                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9114 }
9115
9116 void
9117 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9118 {
9119         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9120
9121         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9122         if (reg != val)
9123                 i40e_write_global_rx_ctl(hw, addr, val);
9124         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9125                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9126 }
9127
9128 static void
9129 i40e_filter_input_set_init(struct i40e_pf *pf)
9130 {
9131         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9132         enum i40e_filter_pctype pctype;
9133         uint64_t input_set, inset_reg;
9134         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9135         int num, i;
9136         uint16_t flow_type;
9137
9138         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9139              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9140                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9141
9142                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9143                         continue;
9144
9145                 input_set = i40e_get_default_input_set(pctype);
9146
9147                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9148                                                    I40E_INSET_MASK_NUM_REG);
9149                 if (num < 0)
9150                         return;
9151                 if (pf->support_multi_driver && num > 0) {
9152                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9153                         return;
9154                 }
9155                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9156                                         input_set);
9157
9158                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9159                                       (uint32_t)(inset_reg & UINT32_MAX));
9160                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9161                                      (uint32_t)((inset_reg >>
9162                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
9163                 if (!pf->support_multi_driver) {
9164                         i40e_check_write_global_reg(hw,
9165                                             I40E_GLQF_HASH_INSET(0, pctype),
9166                                             (uint32_t)(inset_reg & UINT32_MAX));
9167                         i40e_check_write_global_reg(hw,
9168                                              I40E_GLQF_HASH_INSET(1, pctype),
9169                                              (uint32_t)((inset_reg >>
9170                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
9171
9172                         for (i = 0; i < num; i++) {
9173                                 i40e_check_write_global_reg(hw,
9174                                                     I40E_GLQF_FD_MSK(i, pctype),
9175                                                     mask_reg[i]);
9176                                 i40e_check_write_global_reg(hw,
9177                                                   I40E_GLQF_HASH_MSK(i, pctype),
9178                                                   mask_reg[i]);
9179                         }
9180                         /*clear unused mask registers of the pctype */
9181                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9182                                 i40e_check_write_global_reg(hw,
9183                                                     I40E_GLQF_FD_MSK(i, pctype),
9184                                                     0);
9185                                 i40e_check_write_global_reg(hw,
9186                                                   I40E_GLQF_HASH_MSK(i, pctype),
9187                                                   0);
9188                         }
9189                 } else {
9190                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9191                 }
9192                 I40E_WRITE_FLUSH(hw);
9193
9194                 /* store the default input set */
9195                 if (!pf->support_multi_driver)
9196                         pf->hash_input_set[pctype] = input_set;
9197                 pf->fdir.input_set[pctype] = input_set;
9198         }
9199
9200         if (!pf->support_multi_driver) {
9201                 i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9202                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9203                 i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9204         }
9205 }
9206
9207 int
9208 i40e_hash_filter_inset_select(struct i40e_hw *hw,
9209                          struct rte_eth_input_set_conf *conf)
9210 {
9211         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9212         enum i40e_filter_pctype pctype;
9213         uint64_t input_set, inset_reg = 0;
9214         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9215         int ret, i, num;
9216
9217         if (!conf) {
9218                 PMD_DRV_LOG(ERR, "Invalid pointer");
9219                 return -EFAULT;
9220         }
9221         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9222             conf->op != RTE_ETH_INPUT_SET_ADD) {
9223                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9224                 return -EINVAL;
9225         }
9226
9227         if (pf->support_multi_driver) {
9228                 PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
9229                 return -ENOTSUP;
9230         }
9231
9232         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9233         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9234                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9235                 return -EINVAL;
9236         }
9237
9238         if (hw->mac.type == I40E_MAC_X722) {
9239                 /* get translated pctype value in fd pctype register */
9240                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
9241                         I40E_GLQF_FD_PCTYPES((int)pctype));
9242         }
9243
9244         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9245                                    conf->inset_size);
9246         if (ret) {
9247                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9248                 return -EINVAL;
9249         }
9250
9251         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
9252                 /* get inset value in register */
9253                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9254                 inset_reg <<= I40E_32_BIT_WIDTH;
9255                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9256                 input_set |= pf->hash_input_set[pctype];
9257         }
9258         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9259                                            I40E_INSET_MASK_NUM_REG);
9260         if (num < 0)
9261                 return -EINVAL;
9262
9263         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9264
9265         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9266                                     (uint32_t)(inset_reg & UINT32_MAX));
9267         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9268                                     (uint32_t)((inset_reg >>
9269                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
9270         i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9271
9272         for (i = 0; i < num; i++)
9273                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9274                                             mask_reg[i]);
9275         /*clear unused mask registers of the pctype */
9276         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9277                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9278                                             0);
9279         i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9280         I40E_WRITE_FLUSH(hw);
9281
9282         pf->hash_input_set[pctype] = input_set;
9283         return 0;
9284 }
9285
9286 int
9287 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
9288                          struct rte_eth_input_set_conf *conf)
9289 {
9290         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9291         enum i40e_filter_pctype pctype;
9292         uint64_t input_set, inset_reg = 0;
9293         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9294         int ret, i, num;
9295
9296         if (!hw || !conf) {
9297                 PMD_DRV_LOG(ERR, "Invalid pointer");
9298                 return -EFAULT;
9299         }
9300         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9301             conf->op != RTE_ETH_INPUT_SET_ADD) {
9302                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9303                 return -EINVAL;
9304         }
9305
9306         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9307
9308         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9309                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9310                 return -EINVAL;
9311         }
9312
9313         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9314                                    conf->inset_size);
9315         if (ret) {
9316                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9317                 return -EINVAL;
9318         }
9319
9320         /* get inset value in register */
9321         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
9322         inset_reg <<= I40E_32_BIT_WIDTH;
9323         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
9324
9325         /* Can not change the inset reg for flex payload for fdir,
9326          * it is done by writing I40E_PRTQF_FD_FLXINSET
9327          * in i40e_set_flex_mask_on_pctype.
9328          */
9329         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
9330                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
9331         else
9332                 input_set |= pf->fdir.input_set[pctype];
9333         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9334                                            I40E_INSET_MASK_NUM_REG);
9335         if (num < 0)
9336                 return -EINVAL;
9337         if (pf->support_multi_driver && num > 0) {
9338                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9339                 return -ENOTSUP;
9340         }
9341
9342         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9343
9344         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9345                               (uint32_t)(inset_reg & UINT32_MAX));
9346         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9347                              (uint32_t)((inset_reg >>
9348                              I40E_32_BIT_WIDTH) & UINT32_MAX));
9349
9350         if (!pf->support_multi_driver) {
9351                 for (i = 0; i < num; i++)
9352                         i40e_check_write_global_reg(hw,
9353                                                     I40E_GLQF_FD_MSK(i, pctype),
9354                                                     mask_reg[i]);
9355                 /*clear unused mask registers of the pctype */
9356                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9357                         i40e_check_write_global_reg(hw,
9358                                                     I40E_GLQF_FD_MSK(i, pctype),
9359                                                     0);
9360                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9361         } else {
9362                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9363         }
9364         I40E_WRITE_FLUSH(hw);
9365
9366         pf->fdir.input_set[pctype] = input_set;
9367         return 0;
9368 }
9369
9370 static int
9371 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9372 {
9373         int ret = 0;
9374
9375         if (!hw || !info) {
9376                 PMD_DRV_LOG(ERR, "Invalid pointer");
9377                 return -EFAULT;
9378         }
9379
9380         switch (info->info_type) {
9381         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9382                 i40e_get_symmetric_hash_enable_per_port(hw,
9383                                         &(info->info.enable));
9384                 break;
9385         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9386                 ret = i40e_get_hash_filter_global_config(hw,
9387                                 &(info->info.global_conf));
9388                 break;
9389         default:
9390                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9391                                                         info->info_type);
9392                 ret = -EINVAL;
9393                 break;
9394         }
9395
9396         return ret;
9397 }
9398
9399 static int
9400 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9401 {
9402         int ret = 0;
9403
9404         if (!hw || !info) {
9405                 PMD_DRV_LOG(ERR, "Invalid pointer");
9406                 return -EFAULT;
9407         }
9408
9409         switch (info->info_type) {
9410         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9411                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
9412                 break;
9413         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9414                 ret = i40e_set_hash_filter_global_config(hw,
9415                                 &(info->info.global_conf));
9416                 break;
9417         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
9418                 ret = i40e_hash_filter_inset_select(hw,
9419                                                &(info->info.input_set_conf));
9420                 break;
9421
9422         default:
9423                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9424                                                         info->info_type);
9425                 ret = -EINVAL;
9426                 break;
9427         }
9428
9429         return ret;
9430 }
9431
9432 /* Operations for hash function */
9433 static int
9434 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
9435                       enum rte_filter_op filter_op,
9436                       void *arg)
9437 {
9438         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9439         int ret = 0;
9440
9441         switch (filter_op) {
9442         case RTE_ETH_FILTER_NOP:
9443                 break;
9444         case RTE_ETH_FILTER_GET:
9445                 ret = i40e_hash_filter_get(hw,
9446                         (struct rte_eth_hash_filter_info *)arg);
9447                 break;
9448         case RTE_ETH_FILTER_SET:
9449                 ret = i40e_hash_filter_set(hw,
9450                         (struct rte_eth_hash_filter_info *)arg);
9451                 break;
9452         default:
9453                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
9454                                                                 filter_op);
9455                 ret = -ENOTSUP;
9456                 break;
9457         }
9458
9459         return ret;
9460 }
9461
9462 /* Convert ethertype filter structure */
9463 static int
9464 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9465                               struct i40e_ethertype_filter *filter)
9466 {
9467         rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
9468         filter->input.ether_type = input->ether_type;
9469         filter->flags = input->flags;
9470         filter->queue = input->queue;
9471
9472         return 0;
9473 }
9474
9475 /* Check if there exists the ehtertype filter */
9476 struct i40e_ethertype_filter *
9477 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9478                                 const struct i40e_ethertype_filter_input *input)
9479 {
9480         int ret;
9481
9482         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9483         if (ret < 0)
9484                 return NULL;
9485
9486         return ethertype_rule->hash_map[ret];
9487 }
9488
9489 /* Add ethertype filter in SW list */
9490 static int
9491 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9492                                 struct i40e_ethertype_filter *filter)
9493 {
9494         struct i40e_ethertype_rule *rule = &pf->ethertype;
9495         int ret;
9496
9497         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9498         if (ret < 0) {
9499                 PMD_DRV_LOG(ERR,
9500                             "Failed to insert ethertype filter"
9501                             " to hash table %d!",
9502                             ret);
9503                 return ret;
9504         }
9505         rule->hash_map[ret] = filter;
9506
9507         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9508
9509         return 0;
9510 }
9511
9512 /* Delete ethertype filter in SW list */
9513 int
9514 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9515                              struct i40e_ethertype_filter_input *input)
9516 {
9517         struct i40e_ethertype_rule *rule = &pf->ethertype;
9518         struct i40e_ethertype_filter *filter;
9519         int ret;
9520
9521         ret = rte_hash_del_key(rule->hash_table, input);
9522         if (ret < 0) {
9523                 PMD_DRV_LOG(ERR,
9524                             "Failed to delete ethertype filter"
9525                             " to hash table %d!",
9526                             ret);
9527                 return ret;
9528         }
9529         filter = rule->hash_map[ret];
9530         rule->hash_map[ret] = NULL;
9531
9532         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9533         rte_free(filter);
9534
9535         return 0;
9536 }
9537
9538 /*
9539  * Configure ethertype filter, which can director packet by filtering
9540  * with mac address and ether_type or only ether_type
9541  */
9542 int
9543 i40e_ethertype_filter_set(struct i40e_pf *pf,
9544                         struct rte_eth_ethertype_filter *filter,
9545                         bool add)
9546 {
9547         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9548         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9549         struct i40e_ethertype_filter *ethertype_filter, *node;
9550         struct i40e_ethertype_filter check_filter;
9551         struct i40e_control_filter_stats stats;
9552         uint16_t flags = 0;
9553         int ret;
9554
9555         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9556                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9557                 return -EINVAL;
9558         }
9559         if (filter->ether_type == ETHER_TYPE_IPv4 ||
9560                 filter->ether_type == ETHER_TYPE_IPv6) {
9561                 PMD_DRV_LOG(ERR,
9562                         "unsupported ether_type(0x%04x) in control packet filter.",
9563                         filter->ether_type);
9564                 return -EINVAL;
9565         }
9566         if (filter->ether_type == ETHER_TYPE_VLAN)
9567                 PMD_DRV_LOG(WARNING,
9568                         "filter vlan ether_type in first tag is not supported.");
9569
9570         /* Check if there is the filter in SW list */
9571         memset(&check_filter, 0, sizeof(check_filter));
9572         i40e_ethertype_filter_convert(filter, &check_filter);
9573         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9574                                                &check_filter.input);
9575         if (add && node) {
9576                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9577                 return -EINVAL;
9578         }
9579
9580         if (!add && !node) {
9581                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9582                 return -EINVAL;
9583         }
9584
9585         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9586                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9587         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9588                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9589         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9590
9591         memset(&stats, 0, sizeof(stats));
9592         ret = i40e_aq_add_rem_control_packet_filter(hw,
9593                         filter->mac_addr.addr_bytes,
9594                         filter->ether_type, flags,
9595                         pf->main_vsi->seid,
9596                         filter->queue, add, &stats, NULL);
9597
9598         PMD_DRV_LOG(INFO,
9599                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9600                 ret, stats.mac_etype_used, stats.etype_used,
9601                 stats.mac_etype_free, stats.etype_free);
9602         if (ret < 0)
9603                 return -ENOSYS;
9604
9605         /* Add or delete a filter in SW list */
9606         if (add) {
9607                 ethertype_filter = rte_zmalloc("ethertype_filter",
9608                                        sizeof(*ethertype_filter), 0);
9609                 if (ethertype_filter == NULL) {
9610                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9611                         return -ENOMEM;
9612                 }
9613
9614                 rte_memcpy(ethertype_filter, &check_filter,
9615                            sizeof(check_filter));
9616                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9617                 if (ret < 0)
9618                         rte_free(ethertype_filter);
9619         } else {
9620                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9621         }
9622
9623         return ret;
9624 }
9625
9626 /*
9627  * Handle operations for ethertype filter.
9628  */
9629 static int
9630 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
9631                                 enum rte_filter_op filter_op,
9632                                 void *arg)
9633 {
9634         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9635         int ret = 0;
9636
9637         if (filter_op == RTE_ETH_FILTER_NOP)
9638                 return ret;
9639
9640         if (arg == NULL) {
9641                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
9642                             filter_op);
9643                 return -EINVAL;
9644         }
9645
9646         switch (filter_op) {
9647         case RTE_ETH_FILTER_ADD:
9648                 ret = i40e_ethertype_filter_set(pf,
9649                         (struct rte_eth_ethertype_filter *)arg,
9650                         TRUE);
9651                 break;
9652         case RTE_ETH_FILTER_DELETE:
9653                 ret = i40e_ethertype_filter_set(pf,
9654                         (struct rte_eth_ethertype_filter *)arg,
9655                         FALSE);
9656                 break;
9657         default:
9658                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
9659                 ret = -ENOSYS;
9660                 break;
9661         }
9662         return ret;
9663 }
9664
9665 static int
9666 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9667                      enum rte_filter_type filter_type,
9668                      enum rte_filter_op filter_op,
9669                      void *arg)
9670 {
9671         int ret = 0;
9672
9673         if (dev == NULL)
9674                 return -EINVAL;
9675
9676         switch (filter_type) {
9677         case RTE_ETH_FILTER_NONE:
9678                 /* For global configuration */
9679                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9680                 break;
9681         case RTE_ETH_FILTER_HASH:
9682                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9683                 break;
9684         case RTE_ETH_FILTER_MACVLAN:
9685                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9686                 break;
9687         case RTE_ETH_FILTER_ETHERTYPE:
9688                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9689                 break;
9690         case RTE_ETH_FILTER_TUNNEL:
9691                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9692                 break;
9693         case RTE_ETH_FILTER_FDIR:
9694                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9695                 break;
9696         case RTE_ETH_FILTER_GENERIC:
9697                 if (filter_op != RTE_ETH_FILTER_GET)
9698                         return -EINVAL;
9699                 *(const void **)arg = &i40e_flow_ops;
9700                 break;
9701         default:
9702                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9703                                                         filter_type);
9704                 ret = -EINVAL;
9705                 break;
9706         }
9707
9708         return ret;
9709 }
9710
9711 /*
9712  * Check and enable Extended Tag.
9713  * Enabling Extended Tag is important for 40G performance.
9714  */
9715 static void
9716 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9717 {
9718         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9719         uint32_t buf = 0;
9720         int ret;
9721
9722         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9723                                       PCI_DEV_CAP_REG);
9724         if (ret < 0) {
9725                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9726                             PCI_DEV_CAP_REG);
9727                 return;
9728         }
9729         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9730                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9731                 return;
9732         }
9733
9734         buf = 0;
9735         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9736                                       PCI_DEV_CTRL_REG);
9737         if (ret < 0) {
9738                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9739                             PCI_DEV_CTRL_REG);
9740                 return;
9741         }
9742         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9743                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9744                 return;
9745         }
9746         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9747         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9748                                        PCI_DEV_CTRL_REG);
9749         if (ret < 0) {
9750                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9751                             PCI_DEV_CTRL_REG);
9752                 return;
9753         }
9754 }
9755
9756 /*
9757  * As some registers wouldn't be reset unless a global hardware reset,
9758  * hardware initialization is needed to put those registers into an
9759  * expected initial state.
9760  */
9761 static void
9762 i40e_hw_init(struct rte_eth_dev *dev)
9763 {
9764         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9765
9766         i40e_enable_extended_tag(dev);
9767
9768         /* clear the PF Queue Filter control register */
9769         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9770
9771         /* Disable symmetric hash per port */
9772         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9773 }
9774
9775 /*
9776  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9777  * however this function will return only one highest pctype index,
9778  * which is not quite correct. This is known problem of i40e driver
9779  * and needs to be fixed later.
9780  */
9781 enum i40e_filter_pctype
9782 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9783 {
9784         int i;
9785         uint64_t pctype_mask;
9786
9787         if (flow_type < I40E_FLOW_TYPE_MAX) {
9788                 pctype_mask = adapter->pctypes_tbl[flow_type];
9789                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9790                         if (pctype_mask & (1ULL << i))
9791                                 return (enum i40e_filter_pctype)i;
9792                 }
9793         }
9794         return I40E_FILTER_PCTYPE_INVALID;
9795 }
9796
9797 uint16_t
9798 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9799                         enum i40e_filter_pctype pctype)
9800 {
9801         uint16_t flowtype;
9802         uint64_t pctype_mask = 1ULL << pctype;
9803
9804         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9805              flowtype++) {
9806                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9807                         return flowtype;
9808         }
9809
9810         return RTE_ETH_FLOW_UNKNOWN;
9811 }
9812
9813 /*
9814  * On X710, performance number is far from the expectation on recent firmware
9815  * versions; on XL710, performance number is also far from the expectation on
9816  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9817  * mode is enabled and port MAC address is equal to the packet destination MAC
9818  * address. The fix for this issue may not be integrated in the following
9819  * firmware version. So the workaround in software driver is needed. It needs
9820  * to modify the initial values of 3 internal only registers for both X710 and
9821  * XL710. Note that the values for X710 or XL710 could be different, and the
9822  * workaround can be removed when it is fixed in firmware in the future.
9823  */
9824
9825 /* For both X710 and XL710 */
9826 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
9827 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
9828 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
9829
9830 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9831 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9832
9833 /* For X722 */
9834 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9835 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9836
9837 /* For X710 */
9838 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
9839 /* For XL710 */
9840 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
9841 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
9842
9843 static int
9844 i40e_dev_sync_phy_type(struct i40e_hw *hw)
9845 {
9846         enum i40e_status_code status;
9847         struct i40e_aq_get_phy_abilities_resp phy_ab;
9848         int ret = -ENOTSUP;
9849         int retries = 0;
9850
9851         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
9852                                               NULL);
9853
9854         while (status) {
9855                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
9856                         status);
9857                 retries++;
9858                 rte_delay_us(100000);
9859                 if  (retries < 5)
9860                         status = i40e_aq_get_phy_capabilities(hw, false,
9861                                         true, &phy_ab, NULL);
9862                 else
9863                         return ret;
9864         }
9865         return 0;
9866 }
9867
9868 static void
9869 i40e_configure_registers(struct i40e_hw *hw)
9870 {
9871         static struct {
9872                 uint32_t addr;
9873                 uint64_t val;
9874         } reg_table[] = {
9875                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
9876                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
9877                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
9878         };
9879         uint64_t reg;
9880         uint32_t i;
9881         int ret;
9882
9883         for (i = 0; i < RTE_DIM(reg_table); i++) {
9884                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
9885                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9886                                 reg_table[i].val =
9887                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
9888                         else /* For X710/XL710/XXV710 */
9889                                 if (hw->aq.fw_maj_ver < 6)
9890                                         reg_table[i].val =
9891                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
9892                                 else
9893                                         reg_table[i].val =
9894                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
9895                 }
9896
9897                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
9898                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9899                                 reg_table[i].val =
9900                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9901                         else /* For X710/XL710/XXV710 */
9902                                 reg_table[i].val =
9903                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9904                 }
9905
9906                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
9907                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
9908                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
9909                                 reg_table[i].val =
9910                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
9911                         else /* For X710 */
9912                                 reg_table[i].val =
9913                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
9914                 }
9915
9916                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
9917                                                         &reg, NULL);
9918                 if (ret < 0) {
9919                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
9920                                                         reg_table[i].addr);
9921                         break;
9922                 }
9923                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
9924                                                 reg_table[i].addr, reg);
9925                 if (reg == reg_table[i].val)
9926                         continue;
9927
9928                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
9929                                                 reg_table[i].val, NULL);
9930                 if (ret < 0) {
9931                         PMD_DRV_LOG(ERR,
9932                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
9933                                 reg_table[i].val, reg_table[i].addr);
9934                         break;
9935                 }
9936                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
9937                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
9938         }
9939 }
9940
9941 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
9942 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
9943 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
9944 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
9945 static int
9946 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
9947 {
9948         uint32_t reg;
9949         int ret;
9950
9951         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
9952                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
9953                 return -EINVAL;
9954         }
9955
9956         /* Configure for double VLAN RX stripping */
9957         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
9958         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
9959                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
9960                 ret = i40e_aq_debug_write_register(hw,
9961                                                    I40E_VSI_TSR(vsi->vsi_id),
9962                                                    reg, NULL);
9963                 if (ret < 0) {
9964                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
9965                                     vsi->vsi_id);
9966                         return I40E_ERR_CONFIG;
9967                 }
9968         }
9969
9970         /* Configure for double VLAN TX insertion */
9971         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
9972         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
9973                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
9974                 ret = i40e_aq_debug_write_register(hw,
9975                                                    I40E_VSI_L2TAGSTXVALID(
9976                                                    vsi->vsi_id), reg, NULL);
9977                 if (ret < 0) {
9978                         PMD_DRV_LOG(ERR,
9979                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
9980                                 vsi->vsi_id);
9981                         return I40E_ERR_CONFIG;
9982                 }
9983         }
9984
9985         return 0;
9986 }
9987
9988 /**
9989  * i40e_aq_add_mirror_rule
9990  * @hw: pointer to the hardware structure
9991  * @seid: VEB seid to add mirror rule to
9992  * @dst_id: destination vsi seid
9993  * @entries: Buffer which contains the entities to be mirrored
9994  * @count: number of entities contained in the buffer
9995  * @rule_id:the rule_id of the rule to be added
9996  *
9997  * Add a mirror rule for a given veb.
9998  *
9999  **/
10000 static enum i40e_status_code
10001 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10002                         uint16_t seid, uint16_t dst_id,
10003                         uint16_t rule_type, uint16_t *entries,
10004                         uint16_t count, uint16_t *rule_id)
10005 {
10006         struct i40e_aq_desc desc;
10007         struct i40e_aqc_add_delete_mirror_rule cmd;
10008         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10009                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
10010                 &desc.params.raw;
10011         uint16_t buff_len;
10012         enum i40e_status_code status;
10013
10014         i40e_fill_default_direct_cmd_desc(&desc,
10015                                           i40e_aqc_opc_add_mirror_rule);
10016         memset(&cmd, 0, sizeof(cmd));
10017
10018         buff_len = sizeof(uint16_t) * count;
10019         desc.datalen = rte_cpu_to_le_16(buff_len);
10020         if (buff_len > 0)
10021                 desc.flags |= rte_cpu_to_le_16(
10022                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10023         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10024                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10025         cmd.num_entries = rte_cpu_to_le_16(count);
10026         cmd.seid = rte_cpu_to_le_16(seid);
10027         cmd.destination = rte_cpu_to_le_16(dst_id);
10028
10029         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10030         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10031         PMD_DRV_LOG(INFO,
10032                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10033                 hw->aq.asq_last_status, resp->rule_id,
10034                 resp->mirror_rules_used, resp->mirror_rules_free);
10035         *rule_id = rte_le_to_cpu_16(resp->rule_id);
10036
10037         return status;
10038 }
10039
10040 /**
10041  * i40e_aq_del_mirror_rule
10042  * @hw: pointer to the hardware structure
10043  * @seid: VEB seid to add mirror rule to
10044  * @entries: Buffer which contains the entities to be mirrored
10045  * @count: number of entities contained in the buffer
10046  * @rule_id:the rule_id of the rule to be delete
10047  *
10048  * Delete a mirror rule for a given veb.
10049  *
10050  **/
10051 static enum i40e_status_code
10052 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10053                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10054                 uint16_t count, uint16_t rule_id)
10055 {
10056         struct i40e_aq_desc desc;
10057         struct i40e_aqc_add_delete_mirror_rule cmd;
10058         uint16_t buff_len = 0;
10059         enum i40e_status_code status;
10060         void *buff = NULL;
10061
10062         i40e_fill_default_direct_cmd_desc(&desc,
10063                                           i40e_aqc_opc_delete_mirror_rule);
10064         memset(&cmd, 0, sizeof(cmd));
10065         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10066                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10067                                                           I40E_AQ_FLAG_RD));
10068                 cmd.num_entries = count;
10069                 buff_len = sizeof(uint16_t) * count;
10070                 desc.datalen = rte_cpu_to_le_16(buff_len);
10071                 buff = (void *)entries;
10072         } else
10073                 /* rule id is filled in destination field for deleting mirror rule */
10074                 cmd.destination = rte_cpu_to_le_16(rule_id);
10075
10076         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10077                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10078         cmd.seid = rte_cpu_to_le_16(seid);
10079
10080         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10081         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10082
10083         return status;
10084 }
10085
10086 /**
10087  * i40e_mirror_rule_set
10088  * @dev: pointer to the hardware structure
10089  * @mirror_conf: mirror rule info
10090  * @sw_id: mirror rule's sw_id
10091  * @on: enable/disable
10092  *
10093  * set a mirror rule.
10094  *
10095  **/
10096 static int
10097 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10098                         struct rte_eth_mirror_conf *mirror_conf,
10099                         uint8_t sw_id, uint8_t on)
10100 {
10101         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10102         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10103         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10104         struct i40e_mirror_rule *parent = NULL;
10105         uint16_t seid, dst_seid, rule_id;
10106         uint16_t i, j = 0;
10107         int ret;
10108
10109         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10110
10111         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10112                 PMD_DRV_LOG(ERR,
10113                         "mirror rule can not be configured without veb or vfs.");
10114                 return -ENOSYS;
10115         }
10116         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10117                 PMD_DRV_LOG(ERR, "mirror table is full.");
10118                 return -ENOSPC;
10119         }
10120         if (mirror_conf->dst_pool > pf->vf_num) {
10121                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10122                                  mirror_conf->dst_pool);
10123                 return -EINVAL;
10124         }
10125
10126         seid = pf->main_vsi->veb->seid;
10127
10128         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10129                 if (sw_id <= it->index) {
10130                         mirr_rule = it;
10131                         break;
10132                 }
10133                 parent = it;
10134         }
10135         if (mirr_rule && sw_id == mirr_rule->index) {
10136                 if (on) {
10137                         PMD_DRV_LOG(ERR, "mirror rule exists.");
10138                         return -EEXIST;
10139                 } else {
10140                         ret = i40e_aq_del_mirror_rule(hw, seid,
10141                                         mirr_rule->rule_type,
10142                                         mirr_rule->entries,
10143                                         mirr_rule->num_entries, mirr_rule->id);
10144                         if (ret < 0) {
10145                                 PMD_DRV_LOG(ERR,
10146                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
10147                                         ret, hw->aq.asq_last_status);
10148                                 return -ENOSYS;
10149                         }
10150                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10151                         rte_free(mirr_rule);
10152                         pf->nb_mirror_rule--;
10153                         return 0;
10154                 }
10155         } else if (!on) {
10156                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10157                 return -ENOENT;
10158         }
10159
10160         mirr_rule = rte_zmalloc("i40e_mirror_rule",
10161                                 sizeof(struct i40e_mirror_rule) , 0);
10162         if (!mirr_rule) {
10163                 PMD_DRV_LOG(ERR, "failed to allocate memory");
10164                 return I40E_ERR_NO_MEMORY;
10165         }
10166         switch (mirror_conf->rule_type) {
10167         case ETH_MIRROR_VLAN:
10168                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10169                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10170                                 mirr_rule->entries[j] =
10171                                         mirror_conf->vlan.vlan_id[i];
10172                                 j++;
10173                         }
10174                 }
10175                 if (j == 0) {
10176                         PMD_DRV_LOG(ERR, "vlan is not specified.");
10177                         rte_free(mirr_rule);
10178                         return -EINVAL;
10179                 }
10180                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10181                 break;
10182         case ETH_MIRROR_VIRTUAL_POOL_UP:
10183         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10184                 /* check if the specified pool bit is out of range */
10185                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10186                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
10187                         rte_free(mirr_rule);
10188                         return -EINVAL;
10189                 }
10190                 for (i = 0, j = 0; i < pf->vf_num; i++) {
10191                         if (mirror_conf->pool_mask & (1ULL << i)) {
10192                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10193                                 j++;
10194                         }
10195                 }
10196                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10197                         /* add pf vsi to entries */
10198                         mirr_rule->entries[j] = pf->main_vsi_seid;
10199                         j++;
10200                 }
10201                 if (j == 0) {
10202                         PMD_DRV_LOG(ERR, "pool is not specified.");
10203                         rte_free(mirr_rule);
10204                         return -EINVAL;
10205                 }
10206                 /* egress and ingress in aq commands means from switch but not port */
10207                 mirr_rule->rule_type =
10208                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10209                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10210                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10211                 break;
10212         case ETH_MIRROR_UPLINK_PORT:
10213                 /* egress and ingress in aq commands means from switch but not port*/
10214                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10215                 break;
10216         case ETH_MIRROR_DOWNLINK_PORT:
10217                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10218                 break;
10219         default:
10220                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10221                         mirror_conf->rule_type);
10222                 rte_free(mirr_rule);
10223                 return -EINVAL;
10224         }
10225
10226         /* If the dst_pool is equal to vf_num, consider it as PF */
10227         if (mirror_conf->dst_pool == pf->vf_num)
10228                 dst_seid = pf->main_vsi_seid;
10229         else
10230                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10231
10232         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10233                                       mirr_rule->rule_type, mirr_rule->entries,
10234                                       j, &rule_id);
10235         if (ret < 0) {
10236                 PMD_DRV_LOG(ERR,
10237                         "failed to add mirror rule: ret = %d, aq_err = %d.",
10238                         ret, hw->aq.asq_last_status);
10239                 rte_free(mirr_rule);
10240                 return -ENOSYS;
10241         }
10242
10243         mirr_rule->index = sw_id;
10244         mirr_rule->num_entries = j;
10245         mirr_rule->id = rule_id;
10246         mirr_rule->dst_vsi_seid = dst_seid;
10247
10248         if (parent)
10249                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10250         else
10251                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10252
10253         pf->nb_mirror_rule++;
10254         return 0;
10255 }
10256
10257 /**
10258  * i40e_mirror_rule_reset
10259  * @dev: pointer to the device
10260  * @sw_id: mirror rule's sw_id
10261  *
10262  * reset a mirror rule.
10263  *
10264  **/
10265 static int
10266 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10267 {
10268         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10269         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10270         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10271         uint16_t seid;
10272         int ret;
10273
10274         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10275
10276         seid = pf->main_vsi->veb->seid;
10277
10278         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10279                 if (sw_id == it->index) {
10280                         mirr_rule = it;
10281                         break;
10282                 }
10283         }
10284         if (mirr_rule) {
10285                 ret = i40e_aq_del_mirror_rule(hw, seid,
10286                                 mirr_rule->rule_type,
10287                                 mirr_rule->entries,
10288                                 mirr_rule->num_entries, mirr_rule->id);
10289                 if (ret < 0) {
10290                         PMD_DRV_LOG(ERR,
10291                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
10292                                 ret, hw->aq.asq_last_status);
10293                         return -ENOSYS;
10294                 }
10295                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10296                 rte_free(mirr_rule);
10297                 pf->nb_mirror_rule--;
10298         } else {
10299                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10300                 return -ENOENT;
10301         }
10302         return 0;
10303 }
10304
10305 static uint64_t
10306 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10307 {
10308         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10309         uint64_t systim_cycles;
10310
10311         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10312         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10313                         << 32;
10314
10315         return systim_cycles;
10316 }
10317
10318 static uint64_t
10319 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10320 {
10321         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10322         uint64_t rx_tstamp;
10323
10324         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10325         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10326                         << 32;
10327
10328         return rx_tstamp;
10329 }
10330
10331 static uint64_t
10332 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10333 {
10334         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10335         uint64_t tx_tstamp;
10336
10337         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10338         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10339                         << 32;
10340
10341         return tx_tstamp;
10342 }
10343
10344 static void
10345 i40e_start_timecounters(struct rte_eth_dev *dev)
10346 {
10347         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10348         struct i40e_adapter *adapter =
10349                         (struct i40e_adapter *)dev->data->dev_private;
10350         struct rte_eth_link link;
10351         uint32_t tsync_inc_l;
10352         uint32_t tsync_inc_h;
10353
10354         /* Get current link speed. */
10355         i40e_dev_link_update(dev, 1);
10356         rte_eth_linkstatus_get(dev, &link);
10357
10358         switch (link.link_speed) {
10359         case ETH_SPEED_NUM_40G:
10360                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10361                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10362                 break;
10363         case ETH_SPEED_NUM_10G:
10364                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10365                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10366                 break;
10367         case ETH_SPEED_NUM_1G:
10368                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10369                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10370                 break;
10371         default:
10372                 tsync_inc_l = 0x0;
10373                 tsync_inc_h = 0x0;
10374         }
10375
10376         /* Set the timesync increment value. */
10377         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10378         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10379
10380         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10381         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10382         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10383
10384         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10385         adapter->systime_tc.cc_shift = 0;
10386         adapter->systime_tc.nsec_mask = 0;
10387
10388         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10389         adapter->rx_tstamp_tc.cc_shift = 0;
10390         adapter->rx_tstamp_tc.nsec_mask = 0;
10391
10392         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10393         adapter->tx_tstamp_tc.cc_shift = 0;
10394         adapter->tx_tstamp_tc.nsec_mask = 0;
10395 }
10396
10397 static int
10398 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10399 {
10400         struct i40e_adapter *adapter =
10401                         (struct i40e_adapter *)dev->data->dev_private;
10402
10403         adapter->systime_tc.nsec += delta;
10404         adapter->rx_tstamp_tc.nsec += delta;
10405         adapter->tx_tstamp_tc.nsec += delta;
10406
10407         return 0;
10408 }
10409
10410 static int
10411 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10412 {
10413         uint64_t ns;
10414         struct i40e_adapter *adapter =
10415                         (struct i40e_adapter *)dev->data->dev_private;
10416
10417         ns = rte_timespec_to_ns(ts);
10418
10419         /* Set the timecounters to a new value. */
10420         adapter->systime_tc.nsec = ns;
10421         adapter->rx_tstamp_tc.nsec = ns;
10422         adapter->tx_tstamp_tc.nsec = ns;
10423
10424         return 0;
10425 }
10426
10427 static int
10428 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10429 {
10430         uint64_t ns, systime_cycles;
10431         struct i40e_adapter *adapter =
10432                         (struct i40e_adapter *)dev->data->dev_private;
10433
10434         systime_cycles = i40e_read_systime_cyclecounter(dev);
10435         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10436         *ts = rte_ns_to_timespec(ns);
10437
10438         return 0;
10439 }
10440
10441 static int
10442 i40e_timesync_enable(struct rte_eth_dev *dev)
10443 {
10444         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10445         uint32_t tsync_ctl_l;
10446         uint32_t tsync_ctl_h;
10447
10448         /* Stop the timesync system time. */
10449         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10450         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10451         /* Reset the timesync system time value. */
10452         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10453         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10454
10455         i40e_start_timecounters(dev);
10456
10457         /* Clear timesync registers. */
10458         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10459         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10460         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10461         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10462         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10463         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10464
10465         /* Enable timestamping of PTP packets. */
10466         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10467         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10468
10469         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10470         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10471         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10472
10473         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10474         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10475
10476         return 0;
10477 }
10478
10479 static int
10480 i40e_timesync_disable(struct rte_eth_dev *dev)
10481 {
10482         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10483         uint32_t tsync_ctl_l;
10484         uint32_t tsync_ctl_h;
10485
10486         /* Disable timestamping of transmitted PTP packets. */
10487         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10488         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10489
10490         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10491         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10492
10493         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10494         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10495
10496         /* Reset the timesync increment value. */
10497         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10498         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10499
10500         return 0;
10501 }
10502
10503 static int
10504 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10505                                 struct timespec *timestamp, uint32_t flags)
10506 {
10507         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10508         struct i40e_adapter *adapter =
10509                 (struct i40e_adapter *)dev->data->dev_private;
10510
10511         uint32_t sync_status;
10512         uint32_t index = flags & 0x03;
10513         uint64_t rx_tstamp_cycles;
10514         uint64_t ns;
10515
10516         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10517         if ((sync_status & (1 << index)) == 0)
10518                 return -EINVAL;
10519
10520         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10521         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10522         *timestamp = rte_ns_to_timespec(ns);
10523
10524         return 0;
10525 }
10526
10527 static int
10528 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10529                                 struct timespec *timestamp)
10530 {
10531         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10532         struct i40e_adapter *adapter =
10533                 (struct i40e_adapter *)dev->data->dev_private;
10534
10535         uint32_t sync_status;
10536         uint64_t tx_tstamp_cycles;
10537         uint64_t ns;
10538
10539         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10540         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10541                 return -EINVAL;
10542
10543         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10544         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10545         *timestamp = rte_ns_to_timespec(ns);
10546
10547         return 0;
10548 }
10549
10550 /*
10551  * i40e_parse_dcb_configure - parse dcb configure from user
10552  * @dev: the device being configured
10553  * @dcb_cfg: pointer of the result of parse
10554  * @*tc_map: bit map of enabled traffic classes
10555  *
10556  * Returns 0 on success, negative value on failure
10557  */
10558 static int
10559 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10560                          struct i40e_dcbx_config *dcb_cfg,
10561                          uint8_t *tc_map)
10562 {
10563         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10564         uint8_t i, tc_bw, bw_lf;
10565
10566         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10567
10568         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10569         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10570                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10571                 return -EINVAL;
10572         }
10573
10574         /* assume each tc has the same bw */
10575         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10576         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10577                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10578         /* to ensure the sum of tcbw is equal to 100 */
10579         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10580         for (i = 0; i < bw_lf; i++)
10581                 dcb_cfg->etscfg.tcbwtable[i]++;
10582
10583         /* assume each tc has the same Transmission Selection Algorithm */
10584         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10585                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10586
10587         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10588                 dcb_cfg->etscfg.prioritytable[i] =
10589                                 dcb_rx_conf->dcb_tc[i];
10590
10591         /* FW needs one App to configure HW */
10592         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10593         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10594         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10595         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10596
10597         if (dcb_rx_conf->nb_tcs == 0)
10598                 *tc_map = 1; /* tc0 only */
10599         else
10600                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10601
10602         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10603                 dcb_cfg->pfc.willing = 0;
10604                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10605                 dcb_cfg->pfc.pfcenable = *tc_map;
10606         }
10607         return 0;
10608 }
10609
10610
10611 static enum i40e_status_code
10612 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10613                               struct i40e_aqc_vsi_properties_data *info,
10614                               uint8_t enabled_tcmap)
10615 {
10616         enum i40e_status_code ret;
10617         int i, total_tc = 0;
10618         uint16_t qpnum_per_tc, bsf, qp_idx;
10619         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10620         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10621         uint16_t used_queues;
10622
10623         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10624         if (ret != I40E_SUCCESS)
10625                 return ret;
10626
10627         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10628                 if (enabled_tcmap & (1 << i))
10629                         total_tc++;
10630         }
10631         if (total_tc == 0)
10632                 total_tc = 1;
10633         vsi->enabled_tc = enabled_tcmap;
10634
10635         /* different VSI has different queues assigned */
10636         if (vsi->type == I40E_VSI_MAIN)
10637                 used_queues = dev_data->nb_rx_queues -
10638                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10639         else if (vsi->type == I40E_VSI_VMDQ2)
10640                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10641         else {
10642                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10643                 return I40E_ERR_NO_AVAILABLE_VSI;
10644         }
10645
10646         qpnum_per_tc = used_queues / total_tc;
10647         /* Number of queues per enabled TC */
10648         if (qpnum_per_tc == 0) {
10649                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10650                 return I40E_ERR_INVALID_QP_ID;
10651         }
10652         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10653                                 I40E_MAX_Q_PER_TC);
10654         bsf = rte_bsf32(qpnum_per_tc);
10655
10656         /**
10657          * Configure TC and queue mapping parameters, for enabled TC,
10658          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10659          * default queue will serve it.
10660          */
10661         qp_idx = 0;
10662         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10663                 if (vsi->enabled_tc & (1 << i)) {
10664                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10665                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10666                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10667                         qp_idx += qpnum_per_tc;
10668                 } else
10669                         info->tc_mapping[i] = 0;
10670         }
10671
10672         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10673         if (vsi->type == I40E_VSI_SRIOV) {
10674                 info->mapping_flags |=
10675                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10676                 for (i = 0; i < vsi->nb_qps; i++)
10677                         info->queue_mapping[i] =
10678                                 rte_cpu_to_le_16(vsi->base_queue + i);
10679         } else {
10680                 info->mapping_flags |=
10681                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10682                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10683         }
10684         info->valid_sections |=
10685                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10686
10687         return I40E_SUCCESS;
10688 }
10689
10690 /*
10691  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10692  * @veb: VEB to be configured
10693  * @tc_map: enabled TC bitmap
10694  *
10695  * Returns 0 on success, negative value on failure
10696  */
10697 static enum i40e_status_code
10698 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10699 {
10700         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10701         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10702         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10703         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10704         enum i40e_status_code ret = I40E_SUCCESS;
10705         int i;
10706         uint32_t bw_max;
10707
10708         /* Check if enabled_tc is same as existing or new TCs */
10709         if (veb->enabled_tc == tc_map)
10710                 return ret;
10711
10712         /* configure tc bandwidth */
10713         memset(&veb_bw, 0, sizeof(veb_bw));
10714         veb_bw.tc_valid_bits = tc_map;
10715         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10716         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10717                 if (tc_map & BIT_ULL(i))
10718                         veb_bw.tc_bw_share_credits[i] = 1;
10719         }
10720         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10721                                                    &veb_bw, NULL);
10722         if (ret) {
10723                 PMD_INIT_LOG(ERR,
10724                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10725                         hw->aq.asq_last_status);
10726                 return ret;
10727         }
10728
10729         memset(&ets_query, 0, sizeof(ets_query));
10730         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10731                                                    &ets_query, NULL);
10732         if (ret != I40E_SUCCESS) {
10733                 PMD_DRV_LOG(ERR,
10734                         "Failed to get switch_comp ETS configuration %u",
10735                         hw->aq.asq_last_status);
10736                 return ret;
10737         }
10738         memset(&bw_query, 0, sizeof(bw_query));
10739         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10740                                                   &bw_query, NULL);
10741         if (ret != I40E_SUCCESS) {
10742                 PMD_DRV_LOG(ERR,
10743                         "Failed to get switch_comp bandwidth configuration %u",
10744                         hw->aq.asq_last_status);
10745                 return ret;
10746         }
10747
10748         /* store and print out BW info */
10749         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10750         veb->bw_info.bw_max = ets_query.tc_bw_max;
10751         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10752         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10753         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10754                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10755                      I40E_16_BIT_WIDTH);
10756         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10757                 veb->bw_info.bw_ets_share_credits[i] =
10758                                 bw_query.tc_bw_share_credits[i];
10759                 veb->bw_info.bw_ets_credits[i] =
10760                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10761                 /* 4 bits per TC, 4th bit is reserved */
10762                 veb->bw_info.bw_ets_max[i] =
10763                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10764                                   RTE_LEN2MASK(3, uint8_t));
10765                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10766                             veb->bw_info.bw_ets_share_credits[i]);
10767                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10768                             veb->bw_info.bw_ets_credits[i]);
10769                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10770                             veb->bw_info.bw_ets_max[i]);
10771         }
10772
10773         veb->enabled_tc = tc_map;
10774
10775         return ret;
10776 }
10777
10778
10779 /*
10780  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10781  * @vsi: VSI to be configured
10782  * @tc_map: enabled TC bitmap
10783  *
10784  * Returns 0 on success, negative value on failure
10785  */
10786 static enum i40e_status_code
10787 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10788 {
10789         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10790         struct i40e_vsi_context ctxt;
10791         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10792         enum i40e_status_code ret = I40E_SUCCESS;
10793         int i;
10794
10795         /* Check if enabled_tc is same as existing or new TCs */
10796         if (vsi->enabled_tc == tc_map)
10797                 return ret;
10798
10799         /* configure tc bandwidth */
10800         memset(&bw_data, 0, sizeof(bw_data));
10801         bw_data.tc_valid_bits = tc_map;
10802         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10803         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10804                 if (tc_map & BIT_ULL(i))
10805                         bw_data.tc_bw_credits[i] = 1;
10806         }
10807         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10808         if (ret) {
10809                 PMD_INIT_LOG(ERR,
10810                         "AQ command Config VSI BW allocation per TC failed = %d",
10811                         hw->aq.asq_last_status);
10812                 goto out;
10813         }
10814         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10815                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10816
10817         /* Update Queue Pairs Mapping for currently enabled UPs */
10818         ctxt.seid = vsi->seid;
10819         ctxt.pf_num = hw->pf_id;
10820         ctxt.vf_num = 0;
10821         ctxt.uplink_seid = vsi->uplink_seid;
10822         ctxt.info = vsi->info;
10823         i40e_get_cap(hw);
10824         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10825         if (ret)
10826                 goto out;
10827
10828         /* Update the VSI after updating the VSI queue-mapping information */
10829         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10830         if (ret) {
10831                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10832                         hw->aq.asq_last_status);
10833                 goto out;
10834         }
10835         /* update the local VSI info with updated queue map */
10836         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10837                                         sizeof(vsi->info.tc_mapping));
10838         rte_memcpy(&vsi->info.queue_mapping,
10839                         &ctxt.info.queue_mapping,
10840                 sizeof(vsi->info.queue_mapping));
10841         vsi->info.mapping_flags = ctxt.info.mapping_flags;
10842         vsi->info.valid_sections = 0;
10843
10844         /* query and update current VSI BW information */
10845         ret = i40e_vsi_get_bw_config(vsi);
10846         if (ret) {
10847                 PMD_INIT_LOG(ERR,
10848                          "Failed updating vsi bw info, err %s aq_err %s",
10849                          i40e_stat_str(hw, ret),
10850                          i40e_aq_str(hw, hw->aq.asq_last_status));
10851                 goto out;
10852         }
10853
10854         vsi->enabled_tc = tc_map;
10855
10856 out:
10857         return ret;
10858 }
10859
10860 /*
10861  * i40e_dcb_hw_configure - program the dcb setting to hw
10862  * @pf: pf the configuration is taken on
10863  * @new_cfg: new configuration
10864  * @tc_map: enabled TC bitmap
10865  *
10866  * Returns 0 on success, negative value on failure
10867  */
10868 static enum i40e_status_code
10869 i40e_dcb_hw_configure(struct i40e_pf *pf,
10870                       struct i40e_dcbx_config *new_cfg,
10871                       uint8_t tc_map)
10872 {
10873         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10874         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
10875         struct i40e_vsi *main_vsi = pf->main_vsi;
10876         struct i40e_vsi_list *vsi_list;
10877         enum i40e_status_code ret;
10878         int i;
10879         uint32_t val;
10880
10881         /* Use the FW API if FW > v4.4*/
10882         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
10883               (hw->aq.fw_maj_ver >= 5))) {
10884                 PMD_INIT_LOG(ERR,
10885                         "FW < v4.4, can not use FW LLDP API to configure DCB");
10886                 return I40E_ERR_FIRMWARE_API_VERSION;
10887         }
10888
10889         /* Check if need reconfiguration */
10890         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
10891                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
10892                 return I40E_SUCCESS;
10893         }
10894
10895         /* Copy the new config to the current config */
10896         *old_cfg = *new_cfg;
10897         old_cfg->etsrec = old_cfg->etscfg;
10898         ret = i40e_set_dcb_config(hw);
10899         if (ret) {
10900                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
10901                          i40e_stat_str(hw, ret),
10902                          i40e_aq_str(hw, hw->aq.asq_last_status));
10903                 return ret;
10904         }
10905         /* set receive Arbiter to RR mode and ETS scheme by default */
10906         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
10907                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
10908                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
10909                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
10910                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
10911                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
10912                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
10913                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
10914                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
10915                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
10916                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
10917                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
10918                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
10919         }
10920         /* get local mib to check whether it is configured correctly */
10921         /* IEEE mode */
10922         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
10923         /* Get Local DCB Config */
10924         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
10925                                      &hw->local_dcbx_config);
10926
10927         /* if Veb is created, need to update TC of it at first */
10928         if (main_vsi->veb) {
10929                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
10930                 if (ret)
10931                         PMD_INIT_LOG(WARNING,
10932                                  "Failed configuring TC for VEB seid=%d",
10933                                  main_vsi->veb->seid);
10934         }
10935         /* Update each VSI */
10936         i40e_vsi_config_tc(main_vsi, tc_map);
10937         if (main_vsi->veb) {
10938                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
10939                         /* Beside main VSI and VMDQ VSIs, only enable default
10940                          * TC for other VSIs
10941                          */
10942                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
10943                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10944                                                          tc_map);
10945                         else
10946                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10947                                                          I40E_DEFAULT_TCMAP);
10948                         if (ret)
10949                                 PMD_INIT_LOG(WARNING,
10950                                         "Failed configuring TC for VSI seid=%d",
10951                                         vsi_list->vsi->seid);
10952                         /* continue */
10953                 }
10954         }
10955         return I40E_SUCCESS;
10956 }
10957
10958 /*
10959  * i40e_dcb_init_configure - initial dcb config
10960  * @dev: device being configured
10961  * @sw_dcb: indicate whether dcb is sw configured or hw offload
10962  *
10963  * Returns 0 on success, negative value on failure
10964  */
10965 int
10966 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
10967 {
10968         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10969         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10970         int i, ret = 0;
10971
10972         if ((pf->flags & I40E_FLAG_DCB) == 0) {
10973                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10974                 return -ENOTSUP;
10975         }
10976
10977         /* DCB initialization:
10978          * Update DCB configuration from the Firmware and configure
10979          * LLDP MIB change event.
10980          */
10981         if (sw_dcb == TRUE) {
10982                 ret = i40e_init_dcb(hw);
10983                 /* If lldp agent is stopped, the return value from
10984                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
10985                  * adminq status. Otherwise, it should return success.
10986                  */
10987                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
10988                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
10989                         memset(&hw->local_dcbx_config, 0,
10990                                 sizeof(struct i40e_dcbx_config));
10991                         /* set dcb default configuration */
10992                         hw->local_dcbx_config.etscfg.willing = 0;
10993                         hw->local_dcbx_config.etscfg.maxtcs = 0;
10994                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
10995                         hw->local_dcbx_config.etscfg.tsatable[0] =
10996                                                 I40E_IEEE_TSA_ETS;
10997                         /* all UPs mapping to TC0 */
10998                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10999                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11000                         hw->local_dcbx_config.etsrec =
11001                                 hw->local_dcbx_config.etscfg;
11002                         hw->local_dcbx_config.pfc.willing = 0;
11003                         hw->local_dcbx_config.pfc.pfccap =
11004                                                 I40E_MAX_TRAFFIC_CLASS;
11005                         /* FW needs one App to configure HW */
11006                         hw->local_dcbx_config.numapps = 1;
11007                         hw->local_dcbx_config.app[0].selector =
11008                                                 I40E_APP_SEL_ETHTYPE;
11009                         hw->local_dcbx_config.app[0].priority = 3;
11010                         hw->local_dcbx_config.app[0].protocolid =
11011                                                 I40E_APP_PROTOID_FCOE;
11012                         ret = i40e_set_dcb_config(hw);
11013                         if (ret) {
11014                                 PMD_INIT_LOG(ERR,
11015                                         "default dcb config fails. err = %d, aq_err = %d.",
11016                                         ret, hw->aq.asq_last_status);
11017                                 return -ENOSYS;
11018                         }
11019                 } else {
11020                         PMD_INIT_LOG(ERR,
11021                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
11022                                 ret, hw->aq.asq_last_status);
11023                         return -ENOTSUP;
11024                 }
11025         } else {
11026                 ret = i40e_aq_start_lldp(hw, NULL);
11027                 if (ret != I40E_SUCCESS)
11028                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11029
11030                 ret = i40e_init_dcb(hw);
11031                 if (!ret) {
11032                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11033                                 PMD_INIT_LOG(ERR,
11034                                         "HW doesn't support DCBX offload.");
11035                                 return -ENOTSUP;
11036                         }
11037                 } else {
11038                         PMD_INIT_LOG(ERR,
11039                                 "DCBX configuration failed, err = %d, aq_err = %d.",
11040                                 ret, hw->aq.asq_last_status);
11041                         return -ENOTSUP;
11042                 }
11043         }
11044         return 0;
11045 }
11046
11047 /*
11048  * i40e_dcb_setup - setup dcb related config
11049  * @dev: device being configured
11050  *
11051  * Returns 0 on success, negative value on failure
11052  */
11053 static int
11054 i40e_dcb_setup(struct rte_eth_dev *dev)
11055 {
11056         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11057         struct i40e_dcbx_config dcb_cfg;
11058         uint8_t tc_map = 0;
11059         int ret = 0;
11060
11061         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11062                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11063                 return -ENOTSUP;
11064         }
11065
11066         if (pf->vf_num != 0)
11067                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11068
11069         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11070         if (ret) {
11071                 PMD_INIT_LOG(ERR, "invalid dcb config");
11072                 return -EINVAL;
11073         }
11074         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11075         if (ret) {
11076                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11077                 return -ENOSYS;
11078         }
11079
11080         return 0;
11081 }
11082
11083 static int
11084 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11085                       struct rte_eth_dcb_info *dcb_info)
11086 {
11087         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11088         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11089         struct i40e_vsi *vsi = pf->main_vsi;
11090         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11091         uint16_t bsf, tc_mapping;
11092         int i, j = 0;
11093
11094         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11095                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11096         else
11097                 dcb_info->nb_tcs = 1;
11098         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11099                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11100         for (i = 0; i < dcb_info->nb_tcs; i++)
11101                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11102
11103         /* get queue mapping if vmdq is disabled */
11104         if (!pf->nb_cfg_vmdq_vsi) {
11105                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11106                         if (!(vsi->enabled_tc & (1 << i)))
11107                                 continue;
11108                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11109                         dcb_info->tc_queue.tc_rxq[j][i].base =
11110                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11111                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11112                         dcb_info->tc_queue.tc_txq[j][i].base =
11113                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11114                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11115                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11116                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11117                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11118                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11119                 }
11120                 return 0;
11121         }
11122
11123         /* get queue mapping if vmdq is enabled */
11124         do {
11125                 vsi = pf->vmdq[j].vsi;
11126                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11127                         if (!(vsi->enabled_tc & (1 << i)))
11128                                 continue;
11129                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11130                         dcb_info->tc_queue.tc_rxq[j][i].base =
11131                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11132                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11133                         dcb_info->tc_queue.tc_txq[j][i].base =
11134                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11135                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11136                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11137                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11138                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11139                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11140                 }
11141                 j++;
11142         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11143         return 0;
11144 }
11145
11146 static int
11147 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11148 {
11149         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11150         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11151         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11152         uint16_t msix_intr;
11153
11154         msix_intr = intr_handle->intr_vec[queue_id];
11155         if (msix_intr == I40E_MISC_VEC_ID)
11156                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11157                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
11158                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11159                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11160         else
11161                 I40E_WRITE_REG(hw,
11162                                I40E_PFINT_DYN_CTLN(msix_intr -
11163                                                    I40E_RX_VEC_START),
11164                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
11165                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11166                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11167
11168         I40E_WRITE_FLUSH(hw);
11169         rte_intr_enable(&pci_dev->intr_handle);
11170
11171         return 0;
11172 }
11173
11174 static int
11175 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11176 {
11177         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11178         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11179         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11180         uint16_t msix_intr;
11181
11182         msix_intr = intr_handle->intr_vec[queue_id];
11183         if (msix_intr == I40E_MISC_VEC_ID)
11184                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11185                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11186         else
11187                 I40E_WRITE_REG(hw,
11188                                I40E_PFINT_DYN_CTLN(msix_intr -
11189                                                    I40E_RX_VEC_START),
11190                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11191         I40E_WRITE_FLUSH(hw);
11192
11193         return 0;
11194 }
11195
11196 static int i40e_get_regs(struct rte_eth_dev *dev,
11197                          struct rte_dev_reg_info *regs)
11198 {
11199         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11200         uint32_t *ptr_data = regs->data;
11201         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11202         const struct i40e_reg_info *reg_info;
11203
11204         if (ptr_data == NULL) {
11205                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11206                 regs->width = sizeof(uint32_t);
11207                 return 0;
11208         }
11209
11210         /* The first few registers have to be read using AQ operations */
11211         reg_idx = 0;
11212         while (i40e_regs_adminq[reg_idx].name) {
11213                 reg_info = &i40e_regs_adminq[reg_idx++];
11214                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11215                         for (arr_idx2 = 0;
11216                                         arr_idx2 <= reg_info->count2;
11217                                         arr_idx2++) {
11218                                 reg_offset = arr_idx * reg_info->stride1 +
11219                                         arr_idx2 * reg_info->stride2;
11220                                 reg_offset += reg_info->base_addr;
11221                                 ptr_data[reg_offset >> 2] =
11222                                         i40e_read_rx_ctl(hw, reg_offset);
11223                         }
11224         }
11225
11226         /* The remaining registers can be read using primitives */
11227         reg_idx = 0;
11228         while (i40e_regs_others[reg_idx].name) {
11229                 reg_info = &i40e_regs_others[reg_idx++];
11230                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11231                         for (arr_idx2 = 0;
11232                                         arr_idx2 <= reg_info->count2;
11233                                         arr_idx2++) {
11234                                 reg_offset = arr_idx * reg_info->stride1 +
11235                                         arr_idx2 * reg_info->stride2;
11236                                 reg_offset += reg_info->base_addr;
11237                                 ptr_data[reg_offset >> 2] =
11238                                         I40E_READ_REG(hw, reg_offset);
11239                         }
11240         }
11241
11242         return 0;
11243 }
11244
11245 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11246 {
11247         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11248
11249         /* Convert word count to byte count */
11250         return hw->nvm.sr_size << 1;
11251 }
11252
11253 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11254                            struct rte_dev_eeprom_info *eeprom)
11255 {
11256         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11257         uint16_t *data = eeprom->data;
11258         uint16_t offset, length, cnt_words;
11259         int ret_code;
11260
11261         offset = eeprom->offset >> 1;
11262         length = eeprom->length >> 1;
11263         cnt_words = length;
11264
11265         if (offset > hw->nvm.sr_size ||
11266                 offset + length > hw->nvm.sr_size) {
11267                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11268                 return -EINVAL;
11269         }
11270
11271         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11272
11273         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11274         if (ret_code != I40E_SUCCESS || cnt_words != length) {
11275                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11276                 return -EIO;
11277         }
11278
11279         return 0;
11280 }
11281
11282 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11283                                       struct ether_addr *mac_addr)
11284 {
11285         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11286         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11287         struct i40e_vsi *vsi = pf->main_vsi;
11288         struct i40e_mac_filter_info mac_filter;
11289         struct i40e_mac_filter *f;
11290         int ret;
11291
11292         if (!is_valid_assigned_ether_addr(mac_addr)) {
11293                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11294                 return;
11295         }
11296
11297         TAILQ_FOREACH(f, &vsi->mac_list, next) {
11298                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
11299                         break;
11300         }
11301
11302         if (f == NULL) {
11303                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11304                 return;
11305         }
11306
11307         mac_filter = f->mac_info;
11308         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11309         if (ret != I40E_SUCCESS) {
11310                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11311                 return;
11312         }
11313         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11314         ret = i40e_vsi_add_mac(vsi, &mac_filter);
11315         if (ret != I40E_SUCCESS) {
11316                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11317                 return;
11318         }
11319         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11320
11321         i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11322                                   mac_addr->addr_bytes, NULL);
11323 }
11324
11325 static int
11326 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11327 {
11328         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11329         struct rte_eth_dev_data *dev_data = pf->dev_data;
11330         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11331         int ret = 0;
11332
11333         /* check if mtu is within the allowed range */
11334         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
11335                 return -EINVAL;
11336
11337         /* mtu setting is forbidden if port is start */
11338         if (dev_data->dev_started) {
11339                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11340                             dev_data->port_id);
11341                 return -EBUSY;
11342         }
11343
11344         if (frame_size > ETHER_MAX_LEN)
11345                 dev_data->dev_conf.rxmode.jumbo_frame = 1;
11346         else
11347                 dev_data->dev_conf.rxmode.jumbo_frame = 0;
11348
11349         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11350
11351         return ret;
11352 }
11353
11354 /* Restore ethertype filter */
11355 static void
11356 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11357 {
11358         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11359         struct i40e_ethertype_filter_list
11360                 *ethertype_list = &pf->ethertype.ethertype_list;
11361         struct i40e_ethertype_filter *f;
11362         struct i40e_control_filter_stats stats;
11363         uint16_t flags;
11364
11365         TAILQ_FOREACH(f, ethertype_list, rules) {
11366                 flags = 0;
11367                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11368                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11369                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11370                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11371                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11372
11373                 memset(&stats, 0, sizeof(stats));
11374                 i40e_aq_add_rem_control_packet_filter(hw,
11375                                             f->input.mac_addr.addr_bytes,
11376                                             f->input.ether_type,
11377                                             flags, pf->main_vsi->seid,
11378                                             f->queue, 1, &stats, NULL);
11379         }
11380         PMD_DRV_LOG(INFO, "Ethertype filter:"
11381                     " mac_etype_used = %u, etype_used = %u,"
11382                     " mac_etype_free = %u, etype_free = %u",
11383                     stats.mac_etype_used, stats.etype_used,
11384                     stats.mac_etype_free, stats.etype_free);
11385 }
11386
11387 /* Restore tunnel filter */
11388 static void
11389 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11390 {
11391         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11392         struct i40e_vsi *vsi;
11393         struct i40e_pf_vf *vf;
11394         struct i40e_tunnel_filter_list
11395                 *tunnel_list = &pf->tunnel.tunnel_list;
11396         struct i40e_tunnel_filter *f;
11397         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
11398         bool big_buffer = 0;
11399
11400         TAILQ_FOREACH(f, tunnel_list, rules) {
11401                 if (!f->is_to_vf)
11402                         vsi = pf->main_vsi;
11403                 else {
11404                         vf = &pf->vfs[f->vf_id];
11405                         vsi = vf->vsi;
11406                 }
11407                 memset(&cld_filter, 0, sizeof(cld_filter));
11408                 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
11409                         (struct ether_addr *)&cld_filter.element.outer_mac);
11410                 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
11411                         (struct ether_addr *)&cld_filter.element.inner_mac);
11412                 cld_filter.element.inner_vlan = f->input.inner_vlan;
11413                 cld_filter.element.flags = f->input.flags;
11414                 cld_filter.element.tenant_id = f->input.tenant_id;
11415                 cld_filter.element.queue_number = f->queue;
11416                 rte_memcpy(cld_filter.general_fields,
11417                            f->input.general_fields,
11418                            sizeof(f->input.general_fields));
11419
11420                 if (((f->input.flags &
11421                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11422                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11423                     ((f->input.flags &
11424                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11425                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11426                     ((f->input.flags &
11427                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11428                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
11429                         big_buffer = 1;
11430
11431                 if (big_buffer)
11432                         i40e_aq_add_cloud_filters_big_buffer(hw,
11433                                              vsi->seid, &cld_filter, 1);
11434                 else
11435                         i40e_aq_add_cloud_filters(hw, vsi->seid,
11436                                                   &cld_filter.element, 1);
11437         }
11438 }
11439
11440 /* Restore rss filter */
11441 static inline void
11442 i40e_rss_filter_restore(struct i40e_pf *pf)
11443 {
11444         struct i40e_rte_flow_rss_conf *conf =
11445                                         &pf->rss_info;
11446         if (conf->num)
11447                 i40e_config_rss_filter(pf, conf, TRUE);
11448 }
11449
11450 static void
11451 i40e_filter_restore(struct i40e_pf *pf)
11452 {
11453         i40e_ethertype_filter_restore(pf);
11454         i40e_tunnel_filter_restore(pf);
11455         i40e_fdir_filter_restore(pf);
11456         i40e_rss_filter_restore(pf);
11457 }
11458
11459 static bool
11460 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11461 {
11462         if (strcmp(dev->device->driver->name, drv->driver.name))
11463                 return false;
11464
11465         return true;
11466 }
11467
11468 bool
11469 is_i40e_supported(struct rte_eth_dev *dev)
11470 {
11471         return is_device_supported(dev, &rte_i40e_pmd);
11472 }
11473
11474 struct i40e_customized_pctype*
11475 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11476 {
11477         int i;
11478
11479         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11480                 if (pf->customized_pctype[i].index == index)
11481                         return &pf->customized_pctype[i];
11482         }
11483         return NULL;
11484 }
11485
11486 static int
11487 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11488                               uint32_t pkg_size, uint32_t proto_num,
11489                               struct rte_pmd_i40e_proto_info *proto)
11490 {
11491         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11492         uint32_t pctype_num;
11493         struct rte_pmd_i40e_ptype_info *pctype;
11494         uint32_t buff_size;
11495         struct i40e_customized_pctype *new_pctype = NULL;
11496         uint8_t proto_id;
11497         uint8_t pctype_value;
11498         char name[64];
11499         uint32_t i, j, n;
11500         int ret;
11501
11502         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11503                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
11504                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11505         if (ret) {
11506                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11507                 return -1;
11508         }
11509         if (!pctype_num) {
11510                 PMD_DRV_LOG(INFO, "No new pctype added");
11511                 return -1;
11512         }
11513
11514         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11515         pctype = rte_zmalloc("new_pctype", buff_size, 0);
11516         if (!pctype) {
11517                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11518                 return -1;
11519         }
11520         /* get information about new pctype list */
11521         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11522                                         (uint8_t *)pctype, buff_size,
11523                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11524         if (ret) {
11525                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11526                 rte_free(pctype);
11527                 return -1;
11528         }
11529
11530         /* Update customized pctype. */
11531         for (i = 0; i < pctype_num; i++) {
11532                 pctype_value = pctype[i].ptype_id;
11533                 memset(name, 0, sizeof(name));
11534                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11535                         proto_id = pctype[i].protocols[j];
11536                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11537                                 continue;
11538                         for (n = 0; n < proto_num; n++) {
11539                                 if (proto[n].proto_id != proto_id)
11540                                         continue;
11541                                 strcat(name, proto[n].name);
11542                                 strcat(name, "_");
11543                                 break;
11544                         }
11545                 }
11546                 name[strlen(name) - 1] = '\0';
11547                 if (!strcmp(name, "GTPC"))
11548                         new_pctype =
11549                                 i40e_find_customized_pctype(pf,
11550                                                       I40E_CUSTOMIZED_GTPC);
11551                 else if (!strcmp(name, "GTPU_IPV4"))
11552                         new_pctype =
11553                                 i40e_find_customized_pctype(pf,
11554                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11555                 else if (!strcmp(name, "GTPU_IPV6"))
11556                         new_pctype =
11557                                 i40e_find_customized_pctype(pf,
11558                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11559                 else if (!strcmp(name, "GTPU"))
11560                         new_pctype =
11561                                 i40e_find_customized_pctype(pf,
11562                                                       I40E_CUSTOMIZED_GTPU);
11563                 if (new_pctype) {
11564                         new_pctype->pctype = pctype_value;
11565                         new_pctype->valid = true;
11566                 }
11567         }
11568
11569         rte_free(pctype);
11570         return 0;
11571 }
11572
11573 static int
11574 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11575                                uint32_t pkg_size, uint32_t proto_num,
11576                                struct rte_pmd_i40e_proto_info *proto)
11577 {
11578         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11579         uint16_t port_id = dev->data->port_id;
11580         uint32_t ptype_num;
11581         struct rte_pmd_i40e_ptype_info *ptype;
11582         uint32_t buff_size;
11583         uint8_t proto_id;
11584         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11585         uint32_t i, j, n;
11586         bool in_tunnel;
11587         int ret;
11588
11589         /* get information about new ptype num */
11590         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11591                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
11592                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11593         if (ret) {
11594                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
11595                 return ret;
11596         }
11597         if (!ptype_num) {
11598                 PMD_DRV_LOG(INFO, "No new ptype added");
11599                 return -1;
11600         }
11601
11602         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11603         ptype = rte_zmalloc("new_ptype", buff_size, 0);
11604         if (!ptype) {
11605                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11606                 return -1;
11607         }
11608
11609         /* get information about new ptype list */
11610         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11611                                         (uint8_t *)ptype, buff_size,
11612                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11613         if (ret) {
11614                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
11615                 rte_free(ptype);
11616                 return ret;
11617         }
11618
11619         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11620         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11621         if (!ptype_mapping) {
11622                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11623                 rte_free(ptype);
11624                 return -1;
11625         }
11626
11627         /* Update ptype mapping table. */
11628         for (i = 0; i < ptype_num; i++) {
11629                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11630                 ptype_mapping[i].sw_ptype = 0;
11631                 in_tunnel = false;
11632                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11633                         proto_id = ptype[i].protocols[j];
11634                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11635                                 continue;
11636                         for (n = 0; n < proto_num; n++) {
11637                                 if (proto[n].proto_id != proto_id)
11638                                         continue;
11639                                 memset(name, 0, sizeof(name));
11640                                 strcpy(name, proto[n].name);
11641                                 if (!strncasecmp(name, "PPPOE", 5))
11642                                         ptype_mapping[i].sw_ptype |=
11643                                                 RTE_PTYPE_L2_ETHER_PPPOE;
11644                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11645                                          !in_tunnel) {
11646                                         ptype_mapping[i].sw_ptype |=
11647                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11648                                         ptype_mapping[i].sw_ptype |=
11649                                                 RTE_PTYPE_L4_FRAG;
11650                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11651                                            in_tunnel) {
11652                                         ptype_mapping[i].sw_ptype |=
11653                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11654                                         ptype_mapping[i].sw_ptype |=
11655                                                 RTE_PTYPE_INNER_L4_FRAG;
11656                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
11657                                         ptype_mapping[i].sw_ptype |=
11658                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11659                                         in_tunnel = true;
11660                                 } else if (!strncasecmp(name, "IPV4", 4) &&
11661                                            !in_tunnel)
11662                                         ptype_mapping[i].sw_ptype |=
11663                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11664                                 else if (!strncasecmp(name, "IPV4", 4) &&
11665                                          in_tunnel)
11666                                         ptype_mapping[i].sw_ptype |=
11667                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11668                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11669                                          !in_tunnel) {
11670                                         ptype_mapping[i].sw_ptype |=
11671                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11672                                         ptype_mapping[i].sw_ptype |=
11673                                                 RTE_PTYPE_L4_FRAG;
11674                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11675                                            in_tunnel) {
11676                                         ptype_mapping[i].sw_ptype |=
11677                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11678                                         ptype_mapping[i].sw_ptype |=
11679                                                 RTE_PTYPE_INNER_L4_FRAG;
11680                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
11681                                         ptype_mapping[i].sw_ptype |=
11682                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11683                                         in_tunnel = true;
11684                                 } else if (!strncasecmp(name, "IPV6", 4) &&
11685                                            !in_tunnel)
11686                                         ptype_mapping[i].sw_ptype |=
11687                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11688                                 else if (!strncasecmp(name, "IPV6", 4) &&
11689                                          in_tunnel)
11690                                         ptype_mapping[i].sw_ptype |=
11691                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11692                                 else if (!strncasecmp(name, "UDP", 3) &&
11693                                          !in_tunnel)
11694                                         ptype_mapping[i].sw_ptype |=
11695                                                 RTE_PTYPE_L4_UDP;
11696                                 else if (!strncasecmp(name, "UDP", 3) &&
11697                                          in_tunnel)
11698                                         ptype_mapping[i].sw_ptype |=
11699                                                 RTE_PTYPE_INNER_L4_UDP;
11700                                 else if (!strncasecmp(name, "TCP", 3) &&
11701                                          !in_tunnel)
11702                                         ptype_mapping[i].sw_ptype |=
11703                                                 RTE_PTYPE_L4_TCP;
11704                                 else if (!strncasecmp(name, "TCP", 3) &&
11705                                          in_tunnel)
11706                                         ptype_mapping[i].sw_ptype |=
11707                                                 RTE_PTYPE_INNER_L4_TCP;
11708                                 else if (!strncasecmp(name, "SCTP", 4) &&
11709                                          !in_tunnel)
11710                                         ptype_mapping[i].sw_ptype |=
11711                                                 RTE_PTYPE_L4_SCTP;
11712                                 else if (!strncasecmp(name, "SCTP", 4) &&
11713                                          in_tunnel)
11714                                         ptype_mapping[i].sw_ptype |=
11715                                                 RTE_PTYPE_INNER_L4_SCTP;
11716                                 else if ((!strncasecmp(name, "ICMP", 4) ||
11717                                           !strncasecmp(name, "ICMPV6", 6)) &&
11718                                          !in_tunnel)
11719                                         ptype_mapping[i].sw_ptype |=
11720                                                 RTE_PTYPE_L4_ICMP;
11721                                 else if ((!strncasecmp(name, "ICMP", 4) ||
11722                                           !strncasecmp(name, "ICMPV6", 6)) &&
11723                                          in_tunnel)
11724                                         ptype_mapping[i].sw_ptype |=
11725                                                 RTE_PTYPE_INNER_L4_ICMP;
11726                                 else if (!strncasecmp(name, "GTPC", 4)) {
11727                                         ptype_mapping[i].sw_ptype |=
11728                                                 RTE_PTYPE_TUNNEL_GTPC;
11729                                         in_tunnel = true;
11730                                 } else if (!strncasecmp(name, "GTPU", 4)) {
11731                                         ptype_mapping[i].sw_ptype |=
11732                                                 RTE_PTYPE_TUNNEL_GTPU;
11733                                         in_tunnel = true;
11734                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
11735                                         ptype_mapping[i].sw_ptype |=
11736                                                 RTE_PTYPE_TUNNEL_GRENAT;
11737                                         in_tunnel = true;
11738                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
11739                                         ptype_mapping[i].sw_ptype |=
11740                                                 RTE_PTYPE_TUNNEL_L2TP;
11741                                         in_tunnel = true;
11742                                 }
11743
11744                                 break;
11745                         }
11746                 }
11747         }
11748
11749         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
11750                                                 ptype_num, 0);
11751         if (ret)
11752                 PMD_DRV_LOG(ERR, "Failed to update mapping table.");
11753
11754         rte_free(ptype_mapping);
11755         rte_free(ptype);
11756         return ret;
11757 }
11758
11759 void
11760 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
11761                               uint32_t pkg_size)
11762 {
11763         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11764         uint32_t proto_num;
11765         struct rte_pmd_i40e_proto_info *proto;
11766         uint32_t buff_size;
11767         uint32_t i;
11768         int ret;
11769
11770         /* get information about protocol number */
11771         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11772                                        (uint8_t *)&proto_num, sizeof(proto_num),
11773                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
11774         if (ret) {
11775                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
11776                 return;
11777         }
11778         if (!proto_num) {
11779                 PMD_DRV_LOG(INFO, "No new protocol added");
11780                 return;
11781         }
11782
11783         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
11784         proto = rte_zmalloc("new_proto", buff_size, 0);
11785         if (!proto) {
11786                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11787                 return;
11788         }
11789
11790         /* get information about protocol list */
11791         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11792                                         (uint8_t *)proto, buff_size,
11793                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
11794         if (ret) {
11795                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
11796                 rte_free(proto);
11797                 return;
11798         }
11799
11800         /* Check if GTP is supported. */
11801         for (i = 0; i < proto_num; i++) {
11802                 if (!strncmp(proto[i].name, "GTP", 3)) {
11803                         pf->gtp_support = true;
11804                         break;
11805                 }
11806         }
11807
11808         /* Update customized pctype info */
11809         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
11810                                             proto_num, proto);
11811         if (ret)
11812                 PMD_DRV_LOG(INFO, "No pctype is updated.");
11813
11814         /* Update customized ptype info */
11815         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
11816                                            proto_num, proto);
11817         if (ret)
11818                 PMD_DRV_LOG(INFO, "No ptype is updated.");
11819
11820         rte_free(proto);
11821 }
11822
11823 /* Create a QinQ cloud filter
11824  *
11825  * The Fortville NIC has limited resources for tunnel filters,
11826  * so we can only reuse existing filters.
11827  *
11828  * In step 1 we define which Field Vector fields can be used for
11829  * filter types.
11830  * As we do not have the inner tag defined as a field,
11831  * we have to define it first, by reusing one of L1 entries.
11832  *
11833  * In step 2 we are replacing one of existing filter types with
11834  * a new one for QinQ.
11835  * As we reusing L1 and replacing L2, some of the default filter
11836  * types will disappear,which depends on L1 and L2 entries we reuse.
11837  *
11838  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
11839  *
11840  * 1.   Create L1 filter of outer vlan (12b) which will be in use
11841  *              later when we define the cloud filter.
11842  *      a.      Valid_flags.replace_cloud = 0
11843  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
11844  *      c.      New_filter = 0x10
11845  *      d.      TR bit = 0xff (optional, not used here)
11846  *      e.      Buffer – 2 entries:
11847  *              i.      Byte 0 = 8 (outer vlan FV index).
11848  *                      Byte 1 = 0 (rsv)
11849  *                      Byte 2-3 = 0x0fff
11850  *              ii.     Byte 0 = 37 (inner vlan FV index).
11851  *                      Byte 1 =0 (rsv)
11852  *                      Byte 2-3 = 0x0fff
11853  *
11854  * Step 2:
11855  * 2.   Create cloud filter using two L1 filters entries: stag and
11856  *              new filter(outer vlan+ inner vlan)
11857  *      a.      Valid_flags.replace_cloud = 1
11858  *      b.      Old_filter = 1 (instead of outer IP)
11859  *      c.      New_filter = 0x10
11860  *      d.      Buffer – 2 entries:
11861  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
11862  *                      Byte 1-3 = 0 (rsv)
11863  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
11864  *                      Byte 9-11 = 0 (rsv)
11865  */
11866 static int
11867 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
11868 {
11869         int ret = -ENOTSUP;
11870         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
11871         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
11872         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11873
11874         if (pf->support_multi_driver) {
11875                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
11876                 return ret;
11877         }
11878
11879         /* Init */
11880         memset(&filter_replace, 0,
11881                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
11882         memset(&filter_replace_buf, 0,
11883                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
11884
11885         /* create L1 filter */
11886         filter_replace.old_filter_type =
11887                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
11888         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11889         filter_replace.tr_bit = 0;
11890
11891         /* Prepare the buffer, 2 entries */
11892         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
11893         filter_replace_buf.data[0] |=
11894                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11895         /* Field Vector 12b mask */
11896         filter_replace_buf.data[2] = 0xff;
11897         filter_replace_buf.data[3] = 0x0f;
11898         filter_replace_buf.data[4] =
11899                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
11900         filter_replace_buf.data[4] |=
11901                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11902         /* Field Vector 12b mask */
11903         filter_replace_buf.data[6] = 0xff;
11904         filter_replace_buf.data[7] = 0x0f;
11905         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
11906                         &filter_replace_buf);
11907         if (ret != I40E_SUCCESS)
11908                 return ret;
11909         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
11910                     "cloud l1 type is changed from 0x%x to 0x%x",
11911                     filter_replace.old_filter_type,
11912                     filter_replace.new_filter_type);
11913
11914         /* Apply the second L2 cloud filter */
11915         memset(&filter_replace, 0,
11916                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
11917         memset(&filter_replace_buf, 0,
11918                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
11919
11920         /* create L2 filter, input for L2 filter will be L1 filter  */
11921         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
11922         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
11923         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11924
11925         /* Prepare the buffer, 2 entries */
11926         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
11927         filter_replace_buf.data[0] |=
11928                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11929         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11930         filter_replace_buf.data[4] |=
11931                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11932         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
11933                         &filter_replace_buf);
11934         if (!ret) {
11935                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
11936                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
11937                             "cloud filter type is changed from 0x%x to 0x%x",
11938                             filter_replace.old_filter_type,
11939                             filter_replace.new_filter_type);
11940         }
11941         return ret;
11942 }
11943
11944 int
11945 i40e_config_rss_filter(struct i40e_pf *pf,
11946                 struct i40e_rte_flow_rss_conf *conf, bool add)
11947 {
11948         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11949         uint32_t i, lut = 0;
11950         uint16_t j, num;
11951         struct rte_eth_rss_conf rss_conf = conf->rss_conf;
11952         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
11953
11954         if (!add) {
11955                 if (memcmp(conf, rss_info,
11956                         sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
11957                         i40e_pf_disable_rss(pf);
11958                         memset(rss_info, 0,
11959                                 sizeof(struct i40e_rte_flow_rss_conf));
11960                         return 0;
11961                 }
11962                 return -EINVAL;
11963         }
11964
11965         if (rss_info->num)
11966                 return -EINVAL;
11967
11968         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
11969          * It's necessary to calculate the actual PF queues that are configured.
11970          */
11971         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
11972                 num = i40e_pf_calc_configured_queues_num(pf);
11973         else
11974                 num = pf->dev_data->nb_rx_queues;
11975
11976         num = RTE_MIN(num, conf->num);
11977         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
11978                         num);
11979
11980         if (num == 0) {
11981                 PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
11982                 return -ENOTSUP;
11983         }
11984
11985         /* Fill in redirection table */
11986         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
11987                 if (j == num)
11988                         j = 0;
11989                 lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
11990                         hw->func_caps.rss_table_entry_width) - 1));
11991                 if ((i & 3) == 3)
11992                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
11993         }
11994
11995         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
11996                 i40e_pf_disable_rss(pf);
11997                 return 0;
11998         }
11999         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
12000                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
12001                 /* Random default keys */
12002                 static uint32_t rss_key_default[] = {0x6b793944,
12003                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
12004                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
12005                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
12006
12007                 rss_conf.rss_key = (uint8_t *)rss_key_default;
12008                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
12009                                                         sizeof(uint32_t);
12010         }
12011
12012         i40e_hw_rss_hash_set(pf, &rss_conf);
12013
12014         rte_memcpy(rss_info,
12015                 conf, sizeof(struct i40e_rte_flow_rss_conf));
12016
12017         return 0;
12018 }
12019
12020 RTE_INIT(i40e_init_log);
12021 static void
12022 i40e_init_log(void)
12023 {
12024         i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
12025         if (i40e_logtype_init >= 0)
12026                 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
12027         i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
12028         if (i40e_logtype_driver >= 0)
12029                 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
12030 }
12031
12032 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12033                               QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12034                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1");