net/i40e: improve packet type parser
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_eal.h>
15 #include <rte_string_fns.h>
16 #include <rte_pci.h>
17 #include <rte_bus_pci.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev.h>
20 #include <rte_ethdev_pci.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_alarm.h>
25 #include <rte_dev.h>
26 #include <rte_eth_ctrl.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29
30 #include "i40e_logs.h"
31 #include "base/i40e_prototype.h"
32 #include "base/i40e_adminq_cmd.h"
33 #include "base/i40e_type.h"
34 #include "base/i40e_register.h"
35 #include "base/i40e_dcb.h"
36 #include "i40e_ethdev.h"
37 #include "i40e_rxtx.h"
38 #include "i40e_pf.h"
39 #include "i40e_regs.h"
40 #include "rte_pmd_i40e.h"
41
42 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
43 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
44
45 #define I40E_CLEAR_PXE_WAIT_MS     200
46
47 /* Maximun number of capability elements */
48 #define I40E_MAX_CAP_ELE_NUM       128
49
50 /* Wait count and interval */
51 #define I40E_CHK_Q_ENA_COUNT       1000
52 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
53
54 /* Maximun number of VSI */
55 #define I40E_MAX_NUM_VSIS          (384UL)
56
57 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
58
59 /* Flow control default timer */
60 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
61
62 /* Flow control enable fwd bit */
63 #define I40E_PRTMAC_FWD_CTRL   0x00000001
64
65 /* Receive Packet Buffer size */
66 #define I40E_RXPBSIZE (968 * 1024)
67
68 /* Kilobytes shift */
69 #define I40E_KILOSHIFT 10
70
71 /* Flow control default high water */
72 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
73
74 /* Flow control default low water */
75 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
76
77 /* Receive Average Packet Size in Byte*/
78 #define I40E_PACKET_AVERAGE_SIZE 128
79
80 /* Mask of PF interrupt causes */
81 #define I40E_PFINT_ICR0_ENA_MASK ( \
82                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
83                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
84                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
85                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
86                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
87                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
88                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
89                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
90                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
91
92 #define I40E_FLOW_TYPES ( \
93         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
94         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
95         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
96         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
97         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
98         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
99         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
100         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
103         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
104
105 /* Additional timesync values. */
106 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
107 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
108 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
109 #define I40E_PRTTSYN_TSYNENA     0x80000000
110 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
111 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
112
113 /**
114  * Below are values for writing un-exposed registers suggested
115  * by silicon experts
116  */
117 /* Destination MAC address */
118 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
119 /* Source MAC address */
120 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
121 /* Outer (S-Tag) VLAN tag in the outer L2 header */
122 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
123 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
124 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
125 /* Single VLAN tag in the inner L2 header */
126 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
127 /* Source IPv4 address */
128 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
129 /* Destination IPv4 address */
130 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
131 /* Source IPv4 address for X722 */
132 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
133 /* Destination IPv4 address for X722 */
134 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
135 /* IPv4 Protocol for X722 */
136 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
137 /* IPv4 Time to Live for X722 */
138 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
139 /* IPv4 Type of Service (TOS) */
140 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
141 /* IPv4 Protocol */
142 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
143 /* IPv4 Time to Live */
144 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
145 /* Source IPv6 address */
146 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
147 /* Destination IPv6 address */
148 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
149 /* IPv6 Traffic Class (TC) */
150 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
151 /* IPv6 Next Header */
152 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
153 /* IPv6 Hop Limit */
154 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
155 /* Source L4 port */
156 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
157 /* Destination L4 port */
158 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
159 /* SCTP verification tag */
160 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
161 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
162 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
163 /* Source port of tunneling UDP */
164 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
165 /* Destination port of tunneling UDP */
166 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
167 /* UDP Tunneling ID, NVGRE/GRE key */
168 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
169 /* Last ether type */
170 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
171 /* Tunneling outer destination IPv4 address */
172 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
173 /* Tunneling outer destination IPv6 address */
174 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
175 /* 1st word of flex payload */
176 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
177 /* 2nd word of flex payload */
178 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
179 /* 3rd word of flex payload */
180 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
181 /* 4th word of flex payload */
182 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
183 /* 5th word of flex payload */
184 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
185 /* 6th word of flex payload */
186 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
187 /* 7th word of flex payload */
188 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
189 /* 8th word of flex payload */
190 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
191 /* all 8 words flex payload */
192 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
193 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
194
195 #define I40E_TRANSLATE_INSET 0
196 #define I40E_TRANSLATE_REG   1
197
198 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
199 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
200 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
201 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
202 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
203 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
204
205 /* PCI offset for querying capability */
206 #define PCI_DEV_CAP_REG            0xA4
207 /* PCI offset for enabling/disabling Extended Tag */
208 #define PCI_DEV_CTRL_REG           0xA8
209 /* Bit mask of Extended Tag capability */
210 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
211 /* Bit shift of Extended Tag enable/disable */
212 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
213 /* Bit mask of Extended Tag enable/disable */
214 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
215
216 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
217 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
218 static int i40e_dev_configure(struct rte_eth_dev *dev);
219 static int i40e_dev_start(struct rte_eth_dev *dev);
220 static void i40e_dev_stop(struct rte_eth_dev *dev);
221 static void i40e_dev_close(struct rte_eth_dev *dev);
222 static int  i40e_dev_reset(struct rte_eth_dev *dev);
223 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
224 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
225 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
226 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
227 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
228 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
229 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
230                                struct rte_eth_stats *stats);
231 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
232                                struct rte_eth_xstat *xstats, unsigned n);
233 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
234                                      struct rte_eth_xstat_name *xstats_names,
235                                      unsigned limit);
236 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
237 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
238                                             uint16_t queue_id,
239                                             uint8_t stat_idx,
240                                             uint8_t is_rx);
241 static int i40e_fw_version_get(struct rte_eth_dev *dev,
242                                 char *fw_version, size_t fw_size);
243 static void i40e_dev_info_get(struct rte_eth_dev *dev,
244                               struct rte_eth_dev_info *dev_info);
245 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
246                                 uint16_t vlan_id,
247                                 int on);
248 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
249                               enum rte_vlan_type vlan_type,
250                               uint16_t tpid);
251 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
252 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
253                                       uint16_t queue,
254                                       int on);
255 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
256 static int i40e_dev_led_on(struct rte_eth_dev *dev);
257 static int i40e_dev_led_off(struct rte_eth_dev *dev);
258 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
259                               struct rte_eth_fc_conf *fc_conf);
260 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
261                               struct rte_eth_fc_conf *fc_conf);
262 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
263                                        struct rte_eth_pfc_conf *pfc_conf);
264 static int i40e_macaddr_add(struct rte_eth_dev *dev,
265                             struct ether_addr *mac_addr,
266                             uint32_t index,
267                             uint32_t pool);
268 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
269 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
270                                     struct rte_eth_rss_reta_entry64 *reta_conf,
271                                     uint16_t reta_size);
272 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
273                                    struct rte_eth_rss_reta_entry64 *reta_conf,
274                                    uint16_t reta_size);
275
276 static int i40e_get_cap(struct i40e_hw *hw);
277 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
278 static int i40e_pf_setup(struct i40e_pf *pf);
279 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
280 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
281 static int i40e_dcb_setup(struct rte_eth_dev *dev);
282 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
283                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
284 static void i40e_stat_update_48(struct i40e_hw *hw,
285                                uint32_t hireg,
286                                uint32_t loreg,
287                                bool offset_loaded,
288                                uint64_t *offset,
289                                uint64_t *stat);
290 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
291 static void i40e_dev_interrupt_handler(void *param);
292 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
293                                 uint32_t base, uint32_t num);
294 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
295 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
296                         uint32_t base);
297 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
298                         uint16_t num);
299 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
300 static int i40e_veb_release(struct i40e_veb *veb);
301 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
302                                                 struct i40e_vsi *vsi);
303 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
304 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
305 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
306                                              struct i40e_macvlan_filter *mv_f,
307                                              int num,
308                                              uint16_t vlan);
309 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
310 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
311                                     struct rte_eth_rss_conf *rss_conf);
312 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
313                                       struct rte_eth_rss_conf *rss_conf);
314 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
315                                         struct rte_eth_udp_tunnel *udp_tunnel);
316 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
317                                         struct rte_eth_udp_tunnel *udp_tunnel);
318 static void i40e_filter_input_set_init(struct i40e_pf *pf);
319 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
320                                 enum rte_filter_op filter_op,
321                                 void *arg);
322 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
323                                 enum rte_filter_type filter_type,
324                                 enum rte_filter_op filter_op,
325                                 void *arg);
326 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
327                                   struct rte_eth_dcb_info *dcb_info);
328 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
329 static void i40e_configure_registers(struct i40e_hw *hw);
330 static void i40e_hw_init(struct rte_eth_dev *dev);
331 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
332 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
333                                                      uint16_t seid,
334                                                      uint16_t rule_type,
335                                                      uint16_t *entries,
336                                                      uint16_t count,
337                                                      uint16_t rule_id);
338 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
339                         struct rte_eth_mirror_conf *mirror_conf,
340                         uint8_t sw_id, uint8_t on);
341 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
342
343 static int i40e_timesync_enable(struct rte_eth_dev *dev);
344 static int i40e_timesync_disable(struct rte_eth_dev *dev);
345 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
346                                            struct timespec *timestamp,
347                                            uint32_t flags);
348 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
349                                            struct timespec *timestamp);
350 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
351
352 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
353
354 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
355                                    struct timespec *timestamp);
356 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
357                                     const struct timespec *timestamp);
358
359 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
360                                          uint16_t queue_id);
361 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
362                                           uint16_t queue_id);
363
364 static int i40e_get_regs(struct rte_eth_dev *dev,
365                          struct rte_dev_reg_info *regs);
366
367 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
368
369 static int i40e_get_eeprom(struct rte_eth_dev *dev,
370                            struct rte_dev_eeprom_info *eeprom);
371
372 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
373                                       struct ether_addr *mac_addr);
374
375 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
376
377 static int i40e_ethertype_filter_convert(
378         const struct rte_eth_ethertype_filter *input,
379         struct i40e_ethertype_filter *filter);
380 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
381                                    struct i40e_ethertype_filter *filter);
382
383 static int i40e_tunnel_filter_convert(
384         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
385         struct i40e_tunnel_filter *tunnel_filter);
386 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
387                                 struct i40e_tunnel_filter *tunnel_filter);
388 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
389
390 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
391 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
392 static void i40e_filter_restore(struct i40e_pf *pf);
393 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
394
395 int i40e_logtype_init;
396 int i40e_logtype_driver;
397
398 static const struct rte_pci_id pci_id_i40e_map[] = {
399         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
400         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
401         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
402         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
403         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
404         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
405         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
406         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
407         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
408         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
409         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
410         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
411         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
412         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
413         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
419         { .vendor_id = 0, /* sentinel */ },
420 };
421
422 static const struct eth_dev_ops i40e_eth_dev_ops = {
423         .dev_configure                = i40e_dev_configure,
424         .dev_start                    = i40e_dev_start,
425         .dev_stop                     = i40e_dev_stop,
426         .dev_close                    = i40e_dev_close,
427         .dev_reset                    = i40e_dev_reset,
428         .promiscuous_enable           = i40e_dev_promiscuous_enable,
429         .promiscuous_disable          = i40e_dev_promiscuous_disable,
430         .allmulticast_enable          = i40e_dev_allmulticast_enable,
431         .allmulticast_disable         = i40e_dev_allmulticast_disable,
432         .dev_set_link_up              = i40e_dev_set_link_up,
433         .dev_set_link_down            = i40e_dev_set_link_down,
434         .link_update                  = i40e_dev_link_update,
435         .stats_get                    = i40e_dev_stats_get,
436         .xstats_get                   = i40e_dev_xstats_get,
437         .xstats_get_names             = i40e_dev_xstats_get_names,
438         .stats_reset                  = i40e_dev_stats_reset,
439         .xstats_reset                 = i40e_dev_stats_reset,
440         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
441         .fw_version_get               = i40e_fw_version_get,
442         .dev_infos_get                = i40e_dev_info_get,
443         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
444         .vlan_filter_set              = i40e_vlan_filter_set,
445         .vlan_tpid_set                = i40e_vlan_tpid_set,
446         .vlan_offload_set             = i40e_vlan_offload_set,
447         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
448         .vlan_pvid_set                = i40e_vlan_pvid_set,
449         .rx_queue_start               = i40e_dev_rx_queue_start,
450         .rx_queue_stop                = i40e_dev_rx_queue_stop,
451         .tx_queue_start               = i40e_dev_tx_queue_start,
452         .tx_queue_stop                = i40e_dev_tx_queue_stop,
453         .rx_queue_setup               = i40e_dev_rx_queue_setup,
454         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
455         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
456         .rx_queue_release             = i40e_dev_rx_queue_release,
457         .rx_queue_count               = i40e_dev_rx_queue_count,
458         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
459         .rx_descriptor_status         = i40e_dev_rx_descriptor_status,
460         .tx_descriptor_status         = i40e_dev_tx_descriptor_status,
461         .tx_queue_setup               = i40e_dev_tx_queue_setup,
462         .tx_queue_release             = i40e_dev_tx_queue_release,
463         .dev_led_on                   = i40e_dev_led_on,
464         .dev_led_off                  = i40e_dev_led_off,
465         .flow_ctrl_get                = i40e_flow_ctrl_get,
466         .flow_ctrl_set                = i40e_flow_ctrl_set,
467         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
468         .mac_addr_add                 = i40e_macaddr_add,
469         .mac_addr_remove              = i40e_macaddr_remove,
470         .reta_update                  = i40e_dev_rss_reta_update,
471         .reta_query                   = i40e_dev_rss_reta_query,
472         .rss_hash_update              = i40e_dev_rss_hash_update,
473         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
474         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
475         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
476         .filter_ctrl                  = i40e_dev_filter_ctrl,
477         .rxq_info_get                 = i40e_rxq_info_get,
478         .txq_info_get                 = i40e_txq_info_get,
479         .mirror_rule_set              = i40e_mirror_rule_set,
480         .mirror_rule_reset            = i40e_mirror_rule_reset,
481         .timesync_enable              = i40e_timesync_enable,
482         .timesync_disable             = i40e_timesync_disable,
483         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
484         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
485         .get_dcb_info                 = i40e_dev_get_dcb_info,
486         .timesync_adjust_time         = i40e_timesync_adjust_time,
487         .timesync_read_time           = i40e_timesync_read_time,
488         .timesync_write_time          = i40e_timesync_write_time,
489         .get_reg                      = i40e_get_regs,
490         .get_eeprom_length            = i40e_get_eeprom_length,
491         .get_eeprom                   = i40e_get_eeprom,
492         .mac_addr_set                 = i40e_set_default_mac_addr,
493         .mtu_set                      = i40e_dev_mtu_set,
494         .tm_ops_get                   = i40e_tm_ops_get,
495 };
496
497 /* store statistics names and its offset in stats structure */
498 struct rte_i40e_xstats_name_off {
499         char name[RTE_ETH_XSTATS_NAME_SIZE];
500         unsigned offset;
501 };
502
503 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
504         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
505         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
506         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
507         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
508         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
509                 rx_unknown_protocol)},
510         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
511         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
512         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
513         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
514 };
515
516 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
517                 sizeof(rte_i40e_stats_strings[0]))
518
519 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
520         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
521                 tx_dropped_link_down)},
522         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
523         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
524                 illegal_bytes)},
525         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
526         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
527                 mac_local_faults)},
528         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
529                 mac_remote_faults)},
530         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
531                 rx_length_errors)},
532         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
533         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
534         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
535         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
536         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
537         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
538                 rx_size_127)},
539         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
540                 rx_size_255)},
541         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
542                 rx_size_511)},
543         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
544                 rx_size_1023)},
545         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
546                 rx_size_1522)},
547         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
548                 rx_size_big)},
549         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
550                 rx_undersize)},
551         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
552                 rx_oversize)},
553         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
554                 mac_short_packet_dropped)},
555         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
556                 rx_fragments)},
557         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
558         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
559         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
560                 tx_size_127)},
561         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
562                 tx_size_255)},
563         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
564                 tx_size_511)},
565         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
566                 tx_size_1023)},
567         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
568                 tx_size_1522)},
569         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
570                 tx_size_big)},
571         {"rx_flow_director_atr_match_packets",
572                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
573         {"rx_flow_director_sb_match_packets",
574                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
575         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
576                 tx_lpi_status)},
577         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
578                 rx_lpi_status)},
579         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
580                 tx_lpi_count)},
581         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
582                 rx_lpi_count)},
583 };
584
585 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
586                 sizeof(rte_i40e_hw_port_strings[0]))
587
588 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
589         {"xon_packets", offsetof(struct i40e_hw_port_stats,
590                 priority_xon_rx)},
591         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
592                 priority_xoff_rx)},
593 };
594
595 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
596                 sizeof(rte_i40e_rxq_prio_strings[0]))
597
598 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
599         {"xon_packets", offsetof(struct i40e_hw_port_stats,
600                 priority_xon_tx)},
601         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
602                 priority_xoff_tx)},
603         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
604                 priority_xon_2_xoff)},
605 };
606
607 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
608                 sizeof(rte_i40e_txq_prio_strings[0]))
609
610 static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
611         struct rte_pci_device *pci_dev)
612 {
613         return rte_eth_dev_pci_generic_probe(pci_dev,
614                 sizeof(struct i40e_adapter), eth_i40e_dev_init);
615 }
616
617 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
618 {
619         return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
620 }
621
622 static struct rte_pci_driver rte_i40e_pmd = {
623         .id_table = pci_id_i40e_map,
624         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
625                      RTE_PCI_DRV_IOVA_AS_VA,
626         .probe = eth_i40e_pci_probe,
627         .remove = eth_i40e_pci_remove,
628 };
629
630 static inline int
631 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
632                                      struct rte_eth_link *link)
633 {
634         struct rte_eth_link *dst = link;
635         struct rte_eth_link *src = &(dev->data->dev_link);
636
637         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
638                                         *(uint64_t *)src) == 0)
639                 return -1;
640
641         return 0;
642 }
643
644 static inline int
645 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
646                                       struct rte_eth_link *link)
647 {
648         struct rte_eth_link *dst = &(dev->data->dev_link);
649         struct rte_eth_link *src = link;
650
651         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
652                                         *(uint64_t *)src) == 0)
653                 return -1;
654
655         return 0;
656 }
657
658 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
659 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
660 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
661
662 #ifndef I40E_GLQF_ORT
663 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
664 #endif
665 #ifndef I40E_GLQF_PIT
666 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
667 #endif
668 #ifndef I40E_GLQF_L3_MAP
669 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
670 #endif
671
672 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
673 {
674         /*
675          * Force global configuration for flexible payload
676          * to the first 16 bytes of the corresponding L2/L3/L4 paylod.
677          * This should be removed from code once proper
678          * configuration API is added to avoid configuration conflicts
679          * between ports of the same device.
680          */
681         I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
682         I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
683         I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
684
685         /*
686          * Initialize registers for parsing packet type of QinQ
687          * This should be removed from code once proper
688          * configuration API is added to avoid configuration conflicts
689          * between ports of the same device.
690          */
691         I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
692         I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
693 }
694
695 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
696
697 /*
698  * Add a ethertype filter to drop all flow control frames transmitted
699  * from VSIs.
700 */
701 static void
702 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
703 {
704         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
705         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
706                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
707                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
708         int ret;
709
710         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
711                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
712                                 pf->main_vsi_seid, 0,
713                                 TRUE, NULL, NULL);
714         if (ret)
715                 PMD_INIT_LOG(ERR,
716                         "Failed to add filter to drop flow control frames from VSIs.");
717 }
718
719 static int
720 floating_veb_list_handler(__rte_unused const char *key,
721                           const char *floating_veb_value,
722                           void *opaque)
723 {
724         int idx = 0;
725         unsigned int count = 0;
726         char *end = NULL;
727         int min, max;
728         bool *vf_floating_veb = opaque;
729
730         while (isblank(*floating_veb_value))
731                 floating_veb_value++;
732
733         /* Reset floating VEB configuration for VFs */
734         for (idx = 0; idx < I40E_MAX_VF; idx++)
735                 vf_floating_veb[idx] = false;
736
737         min = I40E_MAX_VF;
738         do {
739                 while (isblank(*floating_veb_value))
740                         floating_veb_value++;
741                 if (*floating_veb_value == '\0')
742                         return -1;
743                 errno = 0;
744                 idx = strtoul(floating_veb_value, &end, 10);
745                 if (errno || end == NULL)
746                         return -1;
747                 while (isblank(*end))
748                         end++;
749                 if (*end == '-') {
750                         min = idx;
751                 } else if ((*end == ';') || (*end == '\0')) {
752                         max = idx;
753                         if (min == I40E_MAX_VF)
754                                 min = idx;
755                         if (max >= I40E_MAX_VF)
756                                 max = I40E_MAX_VF - 1;
757                         for (idx = min; idx <= max; idx++) {
758                                 vf_floating_veb[idx] = true;
759                                 count++;
760                         }
761                         min = I40E_MAX_VF;
762                 } else {
763                         return -1;
764                 }
765                 floating_veb_value = end + 1;
766         } while (*end != '\0');
767
768         if (count == 0)
769                 return -1;
770
771         return 0;
772 }
773
774 static void
775 config_vf_floating_veb(struct rte_devargs *devargs,
776                        uint16_t floating_veb,
777                        bool *vf_floating_veb)
778 {
779         struct rte_kvargs *kvlist;
780         int i;
781         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
782
783         if (!floating_veb)
784                 return;
785         /* All the VFs attach to the floating VEB by default
786          * when the floating VEB is enabled.
787          */
788         for (i = 0; i < I40E_MAX_VF; i++)
789                 vf_floating_veb[i] = true;
790
791         if (devargs == NULL)
792                 return;
793
794         kvlist = rte_kvargs_parse(devargs->args, NULL);
795         if (kvlist == NULL)
796                 return;
797
798         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
799                 rte_kvargs_free(kvlist);
800                 return;
801         }
802         /* When the floating_veb_list parameter exists, all the VFs
803          * will attach to the legacy VEB firstly, then configure VFs
804          * to the floating VEB according to the floating_veb_list.
805          */
806         if (rte_kvargs_process(kvlist, floating_veb_list,
807                                floating_veb_list_handler,
808                                vf_floating_veb) < 0) {
809                 rte_kvargs_free(kvlist);
810                 return;
811         }
812         rte_kvargs_free(kvlist);
813 }
814
815 static int
816 i40e_check_floating_handler(__rte_unused const char *key,
817                             const char *value,
818                             __rte_unused void *opaque)
819 {
820         if (strcmp(value, "1"))
821                 return -1;
822
823         return 0;
824 }
825
826 static int
827 is_floating_veb_supported(struct rte_devargs *devargs)
828 {
829         struct rte_kvargs *kvlist;
830         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
831
832         if (devargs == NULL)
833                 return 0;
834
835         kvlist = rte_kvargs_parse(devargs->args, NULL);
836         if (kvlist == NULL)
837                 return 0;
838
839         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
840                 rte_kvargs_free(kvlist);
841                 return 0;
842         }
843         /* Floating VEB is enabled when there's key-value:
844          * enable_floating_veb=1
845          */
846         if (rte_kvargs_process(kvlist, floating_veb_key,
847                                i40e_check_floating_handler, NULL) < 0) {
848                 rte_kvargs_free(kvlist);
849                 return 0;
850         }
851         rte_kvargs_free(kvlist);
852
853         return 1;
854 }
855
856 static void
857 config_floating_veb(struct rte_eth_dev *dev)
858 {
859         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
860         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
861         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
862
863         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
864
865         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
866                 pf->floating_veb =
867                         is_floating_veb_supported(pci_dev->device.devargs);
868                 config_vf_floating_veb(pci_dev->device.devargs,
869                                        pf->floating_veb,
870                                        pf->floating_veb_list);
871         } else {
872                 pf->floating_veb = false;
873         }
874 }
875
876 #define I40E_L2_TAGS_S_TAG_SHIFT 1
877 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
878
879 static int
880 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
881 {
882         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
883         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
884         char ethertype_hash_name[RTE_HASH_NAMESIZE];
885         int ret;
886
887         struct rte_hash_parameters ethertype_hash_params = {
888                 .name = ethertype_hash_name,
889                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
890                 .key_len = sizeof(struct i40e_ethertype_filter_input),
891                 .hash_func = rte_hash_crc,
892                 .hash_func_init_val = 0,
893                 .socket_id = rte_socket_id(),
894         };
895
896         /* Initialize ethertype filter rule list and hash */
897         TAILQ_INIT(&ethertype_rule->ethertype_list);
898         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
899                  "ethertype_%s", dev->device->name);
900         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
901         if (!ethertype_rule->hash_table) {
902                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
903                 return -EINVAL;
904         }
905         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
906                                        sizeof(struct i40e_ethertype_filter *) *
907                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
908                                        0);
909         if (!ethertype_rule->hash_map) {
910                 PMD_INIT_LOG(ERR,
911                              "Failed to allocate memory for ethertype hash map!");
912                 ret = -ENOMEM;
913                 goto err_ethertype_hash_map_alloc;
914         }
915
916         return 0;
917
918 err_ethertype_hash_map_alloc:
919         rte_hash_free(ethertype_rule->hash_table);
920
921         return ret;
922 }
923
924 static int
925 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
926 {
927         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
928         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
929         char tunnel_hash_name[RTE_HASH_NAMESIZE];
930         int ret;
931
932         struct rte_hash_parameters tunnel_hash_params = {
933                 .name = tunnel_hash_name,
934                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
935                 .key_len = sizeof(struct i40e_tunnel_filter_input),
936                 .hash_func = rte_hash_crc,
937                 .hash_func_init_val = 0,
938                 .socket_id = rte_socket_id(),
939         };
940
941         /* Initialize tunnel filter rule list and hash */
942         TAILQ_INIT(&tunnel_rule->tunnel_list);
943         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
944                  "tunnel_%s", dev->device->name);
945         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
946         if (!tunnel_rule->hash_table) {
947                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
948                 return -EINVAL;
949         }
950         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
951                                     sizeof(struct i40e_tunnel_filter *) *
952                                     I40E_MAX_TUNNEL_FILTER_NUM,
953                                     0);
954         if (!tunnel_rule->hash_map) {
955                 PMD_INIT_LOG(ERR,
956                              "Failed to allocate memory for tunnel hash map!");
957                 ret = -ENOMEM;
958                 goto err_tunnel_hash_map_alloc;
959         }
960
961         return 0;
962
963 err_tunnel_hash_map_alloc:
964         rte_hash_free(tunnel_rule->hash_table);
965
966         return ret;
967 }
968
969 static int
970 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
971 {
972         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
973         struct i40e_fdir_info *fdir_info = &pf->fdir;
974         char fdir_hash_name[RTE_HASH_NAMESIZE];
975         int ret;
976
977         struct rte_hash_parameters fdir_hash_params = {
978                 .name = fdir_hash_name,
979                 .entries = I40E_MAX_FDIR_FILTER_NUM,
980                 .key_len = sizeof(struct rte_eth_fdir_input),
981                 .hash_func = rte_hash_crc,
982                 .hash_func_init_val = 0,
983                 .socket_id = rte_socket_id(),
984         };
985
986         /* Initialize flow director filter rule list and hash */
987         TAILQ_INIT(&fdir_info->fdir_list);
988         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
989                  "fdir_%s", dev->device->name);
990         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
991         if (!fdir_info->hash_table) {
992                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
993                 return -EINVAL;
994         }
995         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
996                                           sizeof(struct i40e_fdir_filter *) *
997                                           I40E_MAX_FDIR_FILTER_NUM,
998                                           0);
999         if (!fdir_info->hash_map) {
1000                 PMD_INIT_LOG(ERR,
1001                              "Failed to allocate memory for fdir hash map!");
1002                 ret = -ENOMEM;
1003                 goto err_fdir_hash_map_alloc;
1004         }
1005         return 0;
1006
1007 err_fdir_hash_map_alloc:
1008         rte_hash_free(fdir_info->hash_table);
1009
1010         return ret;
1011 }
1012
1013 static void
1014 i40e_init_customized_info(struct i40e_pf *pf)
1015 {
1016         int i;
1017
1018         /* Initialize customized pctype */
1019         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1020                 pf->customized_pctype[i].index = i;
1021                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1022                 pf->customized_pctype[i].valid = false;
1023         }
1024
1025         pf->gtp_support = false;
1026 }
1027
1028 void
1029 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1030 {
1031         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1032         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1033         struct i40e_queue_regions *info = &pf->queue_region;
1034         uint16_t i;
1035
1036         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1037                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1038
1039         memset(info, 0, sizeof(struct i40e_queue_regions));
1040 }
1041
1042 static int
1043 eth_i40e_dev_init(struct rte_eth_dev *dev)
1044 {
1045         struct rte_pci_device *pci_dev;
1046         struct rte_intr_handle *intr_handle;
1047         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1048         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1049         struct i40e_vsi *vsi;
1050         int ret;
1051         uint32_t len;
1052         uint8_t aq_fail = 0;
1053
1054         PMD_INIT_FUNC_TRACE();
1055
1056         dev->dev_ops = &i40e_eth_dev_ops;
1057         dev->rx_pkt_burst = i40e_recv_pkts;
1058         dev->tx_pkt_burst = i40e_xmit_pkts;
1059         dev->tx_pkt_prepare = i40e_prep_pkts;
1060
1061         /* for secondary processes, we don't initialise any further as primary
1062          * has already done this work. Only check we don't need a different
1063          * RX function */
1064         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1065                 i40e_set_rx_function(dev);
1066                 i40e_set_tx_function(dev);
1067                 return 0;
1068         }
1069         i40e_set_default_ptype_table(dev);
1070         i40e_set_default_pctype_table(dev);
1071         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1072         intr_handle = &pci_dev->intr_handle;
1073
1074         rte_eth_copy_pci_info(dev, pci_dev);
1075
1076         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1077         pf->adapter->eth_dev = dev;
1078         pf->dev_data = dev->data;
1079
1080         hw->back = I40E_PF_TO_ADAPTER(pf);
1081         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1082         if (!hw->hw_addr) {
1083                 PMD_INIT_LOG(ERR,
1084                         "Hardware is not available, as address is NULL");
1085                 return -ENODEV;
1086         }
1087
1088         hw->vendor_id = pci_dev->id.vendor_id;
1089         hw->device_id = pci_dev->id.device_id;
1090         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1091         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1092         hw->bus.device = pci_dev->addr.devid;
1093         hw->bus.func = pci_dev->addr.function;
1094         hw->adapter_stopped = 0;
1095
1096         /* Make sure all is clean before doing PF reset */
1097         i40e_clear_hw(hw);
1098
1099         /* Initialize the hardware */
1100         i40e_hw_init(dev);
1101
1102         /* Reset here to make sure all is clean for each PF */
1103         ret = i40e_pf_reset(hw);
1104         if (ret) {
1105                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1106                 return ret;
1107         }
1108
1109         /* Initialize the shared code (base driver) */
1110         ret = i40e_init_shared_code(hw);
1111         if (ret) {
1112                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1113                 return ret;
1114         }
1115
1116         /*
1117          * To work around the NVM issue, initialize registers
1118          * for flexible payload and packet type of QinQ by
1119          * software. It should be removed once issues are fixed
1120          * in NVM.
1121          */
1122         i40e_GLQF_reg_init(hw);
1123
1124         /* Initialize the input set for filters (hash and fd) to default value */
1125         i40e_filter_input_set_init(pf);
1126
1127         /* Initialize the parameters for adminq */
1128         i40e_init_adminq_parameter(hw);
1129         ret = i40e_init_adminq(hw);
1130         if (ret != I40E_SUCCESS) {
1131                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1132                 return -EIO;
1133         }
1134         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1135                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1136                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1137                      ((hw->nvm.version >> 12) & 0xf),
1138                      ((hw->nvm.version >> 4) & 0xff),
1139                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1140
1141         /* initialise the L3_MAP register */
1142         ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
1143                                    0x00000028,  NULL);
1144         if (ret)
1145                 PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret);
1146
1147         /* Need the special FW version to support floating VEB */
1148         config_floating_veb(dev);
1149         /* Clear PXE mode */
1150         i40e_clear_pxe_mode(hw);
1151         i40e_dev_sync_phy_type(hw);
1152
1153         /*
1154          * On X710, performance number is far from the expectation on recent
1155          * firmware versions. The fix for this issue may not be integrated in
1156          * the following firmware version. So the workaround in software driver
1157          * is needed. It needs to modify the initial values of 3 internal only
1158          * registers. Note that the workaround can be removed when it is fixed
1159          * in firmware in the future.
1160          */
1161         i40e_configure_registers(hw);
1162
1163         /* Get hw capabilities */
1164         ret = i40e_get_cap(hw);
1165         if (ret != I40E_SUCCESS) {
1166                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1167                 goto err_get_capabilities;
1168         }
1169
1170         /* Initialize parameters for PF */
1171         ret = i40e_pf_parameter_init(dev);
1172         if (ret != 0) {
1173                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1174                 goto err_parameter_init;
1175         }
1176
1177         /* Initialize the queue management */
1178         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1179         if (ret < 0) {
1180                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1181                 goto err_qp_pool_init;
1182         }
1183         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1184                                 hw->func_caps.num_msix_vectors - 1);
1185         if (ret < 0) {
1186                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1187                 goto err_msix_pool_init;
1188         }
1189
1190         /* Initialize lan hmc */
1191         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1192                                 hw->func_caps.num_rx_qp, 0, 0);
1193         if (ret != I40E_SUCCESS) {
1194                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1195                 goto err_init_lan_hmc;
1196         }
1197
1198         /* Configure lan hmc */
1199         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1200         if (ret != I40E_SUCCESS) {
1201                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1202                 goto err_configure_lan_hmc;
1203         }
1204
1205         /* Get and check the mac address */
1206         i40e_get_mac_addr(hw, hw->mac.addr);
1207         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1208                 PMD_INIT_LOG(ERR, "mac address is not valid");
1209                 ret = -EIO;
1210                 goto err_get_mac_addr;
1211         }
1212         /* Copy the permanent MAC address */
1213         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1214                         (struct ether_addr *) hw->mac.perm_addr);
1215
1216         /* Disable flow control */
1217         hw->fc.requested_mode = I40E_FC_NONE;
1218         i40e_set_fc(hw, &aq_fail, TRUE);
1219
1220         /* Set the global registers with default ether type value */
1221         ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
1222         if (ret != I40E_SUCCESS) {
1223                 PMD_INIT_LOG(ERR,
1224                         "Failed to set the default outer VLAN ether type");
1225                 goto err_setup_pf_switch;
1226         }
1227
1228         /* PF setup, which includes VSI setup */
1229         ret = i40e_pf_setup(pf);
1230         if (ret) {
1231                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1232                 goto err_setup_pf_switch;
1233         }
1234
1235         /* reset all stats of the device, including pf and main vsi */
1236         i40e_dev_stats_reset(dev);
1237
1238         vsi = pf->main_vsi;
1239
1240         /* Disable double vlan by default */
1241         i40e_vsi_config_double_vlan(vsi, FALSE);
1242
1243         /* Disable S-TAG identification when floating_veb is disabled */
1244         if (!pf->floating_veb) {
1245                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1246                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1247                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1248                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1249                 }
1250         }
1251
1252         if (!vsi->max_macaddrs)
1253                 len = ETHER_ADDR_LEN;
1254         else
1255                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1256
1257         /* Should be after VSI initialized */
1258         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1259         if (!dev->data->mac_addrs) {
1260                 PMD_INIT_LOG(ERR,
1261                         "Failed to allocated memory for storing mac address");
1262                 goto err_mac_alloc;
1263         }
1264         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1265                                         &dev->data->mac_addrs[0]);
1266
1267         /* Init dcb to sw mode by default */
1268         ret = i40e_dcb_init_configure(dev, TRUE);
1269         if (ret != I40E_SUCCESS) {
1270                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1271                 pf->flags &= ~I40E_FLAG_DCB;
1272         }
1273         /* Update HW struct after DCB configuration */
1274         i40e_get_cap(hw);
1275
1276         /* initialize pf host driver to setup SRIOV resource if applicable */
1277         i40e_pf_host_init(dev);
1278
1279         /* register callback func to eal lib */
1280         rte_intr_callback_register(intr_handle,
1281                                    i40e_dev_interrupt_handler, dev);
1282
1283         /* configure and enable device interrupt */
1284         i40e_pf_config_irq0(hw, TRUE);
1285         i40e_pf_enable_irq0(hw);
1286
1287         /* enable uio intr after callback register */
1288         rte_intr_enable(intr_handle);
1289         /*
1290          * Add an ethertype filter to drop all flow control frames transmitted
1291          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1292          * frames to wire.
1293          */
1294         i40e_add_tx_flow_control_drop_filter(pf);
1295
1296         /* Set the max frame size to 0x2600 by default,
1297          * in case other drivers changed the default value.
1298          */
1299         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1300
1301         /* initialize mirror rule list */
1302         TAILQ_INIT(&pf->mirror_list);
1303
1304         /* initialize Traffic Manager configuration */
1305         i40e_tm_conf_init(dev);
1306
1307         /* Initialize customized information */
1308         i40e_init_customized_info(pf);
1309
1310         ret = i40e_init_ethtype_filter_list(dev);
1311         if (ret < 0)
1312                 goto err_init_ethtype_filter_list;
1313         ret = i40e_init_tunnel_filter_list(dev);
1314         if (ret < 0)
1315                 goto err_init_tunnel_filter_list;
1316         ret = i40e_init_fdir_filter_list(dev);
1317         if (ret < 0)
1318                 goto err_init_fdir_filter_list;
1319
1320         /* initialize queue region configuration */
1321         i40e_init_queue_region_conf(dev);
1322
1323         return 0;
1324
1325 err_init_fdir_filter_list:
1326         rte_free(pf->tunnel.hash_table);
1327         rte_free(pf->tunnel.hash_map);
1328 err_init_tunnel_filter_list:
1329         rte_free(pf->ethertype.hash_table);
1330         rte_free(pf->ethertype.hash_map);
1331 err_init_ethtype_filter_list:
1332         rte_free(dev->data->mac_addrs);
1333 err_mac_alloc:
1334         i40e_vsi_release(pf->main_vsi);
1335 err_setup_pf_switch:
1336 err_get_mac_addr:
1337 err_configure_lan_hmc:
1338         (void)i40e_shutdown_lan_hmc(hw);
1339 err_init_lan_hmc:
1340         i40e_res_pool_destroy(&pf->msix_pool);
1341 err_msix_pool_init:
1342         i40e_res_pool_destroy(&pf->qp_pool);
1343 err_qp_pool_init:
1344 err_parameter_init:
1345 err_get_capabilities:
1346         (void)i40e_shutdown_adminq(hw);
1347
1348         return ret;
1349 }
1350
1351 static void
1352 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1353 {
1354         struct i40e_ethertype_filter *p_ethertype;
1355         struct i40e_ethertype_rule *ethertype_rule;
1356
1357         ethertype_rule = &pf->ethertype;
1358         /* Remove all ethertype filter rules and hash */
1359         if (ethertype_rule->hash_map)
1360                 rte_free(ethertype_rule->hash_map);
1361         if (ethertype_rule->hash_table)
1362                 rte_hash_free(ethertype_rule->hash_table);
1363
1364         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1365                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1366                              p_ethertype, rules);
1367                 rte_free(p_ethertype);
1368         }
1369 }
1370
1371 static void
1372 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1373 {
1374         struct i40e_tunnel_filter *p_tunnel;
1375         struct i40e_tunnel_rule *tunnel_rule;
1376
1377         tunnel_rule = &pf->tunnel;
1378         /* Remove all tunnel director rules and hash */
1379         if (tunnel_rule->hash_map)
1380                 rte_free(tunnel_rule->hash_map);
1381         if (tunnel_rule->hash_table)
1382                 rte_hash_free(tunnel_rule->hash_table);
1383
1384         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1385                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1386                 rte_free(p_tunnel);
1387         }
1388 }
1389
1390 static void
1391 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1392 {
1393         struct i40e_fdir_filter *p_fdir;
1394         struct i40e_fdir_info *fdir_info;
1395
1396         fdir_info = &pf->fdir;
1397         /* Remove all flow director rules and hash */
1398         if (fdir_info->hash_map)
1399                 rte_free(fdir_info->hash_map);
1400         if (fdir_info->hash_table)
1401                 rte_hash_free(fdir_info->hash_table);
1402
1403         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1404                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1405                 rte_free(p_fdir);
1406         }
1407 }
1408
1409 static int
1410 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1411 {
1412         struct i40e_pf *pf;
1413         struct rte_pci_device *pci_dev;
1414         struct rte_intr_handle *intr_handle;
1415         struct i40e_hw *hw;
1416         struct i40e_filter_control_settings settings;
1417         struct rte_flow *p_flow;
1418         int ret;
1419         uint8_t aq_fail = 0;
1420
1421         PMD_INIT_FUNC_TRACE();
1422
1423         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1424                 return 0;
1425
1426         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1427         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1428         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1429         intr_handle = &pci_dev->intr_handle;
1430
1431         if (hw->adapter_stopped == 0)
1432                 i40e_dev_close(dev);
1433
1434         dev->dev_ops = NULL;
1435         dev->rx_pkt_burst = NULL;
1436         dev->tx_pkt_burst = NULL;
1437
1438         /* Clear PXE mode */
1439         i40e_clear_pxe_mode(hw);
1440
1441         /* Unconfigure filter control */
1442         memset(&settings, 0, sizeof(settings));
1443         ret = i40e_set_filter_control(hw, &settings);
1444         if (ret)
1445                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1446                                         ret);
1447
1448         /* Disable flow control */
1449         hw->fc.requested_mode = I40E_FC_NONE;
1450         i40e_set_fc(hw, &aq_fail, TRUE);
1451
1452         /* uninitialize pf host driver */
1453         i40e_pf_host_uninit(dev);
1454
1455         rte_free(dev->data->mac_addrs);
1456         dev->data->mac_addrs = NULL;
1457
1458         /* disable uio intr before callback unregister */
1459         rte_intr_disable(intr_handle);
1460
1461         /* register callback func to eal lib */
1462         rte_intr_callback_unregister(intr_handle,
1463                                      i40e_dev_interrupt_handler, dev);
1464
1465         i40e_rm_ethtype_filter_list(pf);
1466         i40e_rm_tunnel_filter_list(pf);
1467         i40e_rm_fdir_filter_list(pf);
1468
1469         /* Remove all flows */
1470         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1471                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1472                 rte_free(p_flow);
1473         }
1474
1475         /* Remove all Traffic Manager configuration */
1476         i40e_tm_conf_uninit(dev);
1477
1478         return 0;
1479 }
1480
1481 static int
1482 i40e_dev_configure(struct rte_eth_dev *dev)
1483 {
1484         struct i40e_adapter *ad =
1485                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1486         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1487         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1488         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1489         int i, ret;
1490
1491         ret = i40e_dev_sync_phy_type(hw);
1492         if (ret)
1493                 return ret;
1494
1495         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1496          * bulk allocation or vector Rx preconditions we will reset it.
1497          */
1498         ad->rx_bulk_alloc_allowed = true;
1499         ad->rx_vec_allowed = true;
1500         ad->tx_simple_allowed = true;
1501         ad->tx_vec_allowed = true;
1502
1503         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1504                 ret = i40e_fdir_setup(pf);
1505                 if (ret != I40E_SUCCESS) {
1506                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1507                         return -ENOTSUP;
1508                 }
1509                 ret = i40e_fdir_configure(dev);
1510                 if (ret < 0) {
1511                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1512                         goto err;
1513                 }
1514         } else
1515                 i40e_fdir_teardown(pf);
1516
1517         ret = i40e_dev_init_vlan(dev);
1518         if (ret < 0)
1519                 goto err;
1520
1521         /* VMDQ setup.
1522          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1523          *  RSS setting have different requirements.
1524          *  General PMD driver call sequence are NIC init, configure,
1525          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1526          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1527          *  applicable. So, VMDQ setting has to be done before
1528          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1529          *  For RSS setting, it will try to calculate actual configured RX queue
1530          *  number, which will be available after rx_queue_setup(). dev_start()
1531          *  function is good to place RSS setup.
1532          */
1533         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1534                 ret = i40e_vmdq_setup(dev);
1535                 if (ret)
1536                         goto err;
1537         }
1538
1539         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1540                 ret = i40e_dcb_setup(dev);
1541                 if (ret) {
1542                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1543                         goto err_dcb;
1544                 }
1545         }
1546
1547         TAILQ_INIT(&pf->flow_list);
1548
1549         return 0;
1550
1551 err_dcb:
1552         /* need to release vmdq resource if exists */
1553         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1554                 i40e_vsi_release(pf->vmdq[i].vsi);
1555                 pf->vmdq[i].vsi = NULL;
1556         }
1557         rte_free(pf->vmdq);
1558         pf->vmdq = NULL;
1559 err:
1560         /* need to release fdir resource if exists */
1561         i40e_fdir_teardown(pf);
1562         return ret;
1563 }
1564
1565 void
1566 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1567 {
1568         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1569         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1570         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1571         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1572         uint16_t msix_vect = vsi->msix_intr;
1573         uint16_t i;
1574
1575         for (i = 0; i < vsi->nb_qps; i++) {
1576                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1577                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1578                 rte_wmb();
1579         }
1580
1581         if (vsi->type != I40E_VSI_SRIOV) {
1582                 if (!rte_intr_allow_others(intr_handle)) {
1583                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1584                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1585                         I40E_WRITE_REG(hw,
1586                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1587                                        0);
1588                 } else {
1589                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1590                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1591                         I40E_WRITE_REG(hw,
1592                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1593                                                        msix_vect - 1), 0);
1594                 }
1595         } else {
1596                 uint32_t reg;
1597                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1598                         vsi->user_param + (msix_vect - 1);
1599
1600                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1601                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1602         }
1603         I40E_WRITE_FLUSH(hw);
1604 }
1605
1606 static void
1607 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1608                        int base_queue, int nb_queue,
1609                        uint16_t itr_idx)
1610 {
1611         int i;
1612         uint32_t val;
1613         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1614
1615         /* Bind all RX queues to allocated MSIX interrupt */
1616         for (i = 0; i < nb_queue; i++) {
1617                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1618                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1619                         ((base_queue + i + 1) <<
1620                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1621                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1622                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1623
1624                 if (i == nb_queue - 1)
1625                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1626                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1627         }
1628
1629         /* Write first RX queue to Link list register as the head element */
1630         if (vsi->type != I40E_VSI_SRIOV) {
1631                 uint16_t interval =
1632                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1633
1634                 if (msix_vect == I40E_MISC_VEC_ID) {
1635                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1636                                        (base_queue <<
1637                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1638                                        (0x0 <<
1639                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1640                         I40E_WRITE_REG(hw,
1641                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1642                                        interval);
1643                 } else {
1644                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1645                                        (base_queue <<
1646                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1647                                        (0x0 <<
1648                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1649                         I40E_WRITE_REG(hw,
1650                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1651                                                        msix_vect - 1),
1652                                        interval);
1653                 }
1654         } else {
1655                 uint32_t reg;
1656
1657                 if (msix_vect == I40E_MISC_VEC_ID) {
1658                         I40E_WRITE_REG(hw,
1659                                        I40E_VPINT_LNKLST0(vsi->user_param),
1660                                        (base_queue <<
1661                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1662                                        (0x0 <<
1663                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1664                 } else {
1665                         /* num_msix_vectors_vf needs to minus irq0 */
1666                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1667                                 vsi->user_param + (msix_vect - 1);
1668
1669                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1670                                        (base_queue <<
1671                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1672                                        (0x0 <<
1673                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1674                 }
1675         }
1676
1677         I40E_WRITE_FLUSH(hw);
1678 }
1679
1680 void
1681 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
1682 {
1683         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1684         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1685         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1686         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1687         uint16_t msix_vect = vsi->msix_intr;
1688         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1689         uint16_t queue_idx = 0;
1690         int record = 0;
1691         uint32_t val;
1692         int i;
1693
1694         for (i = 0; i < vsi->nb_qps; i++) {
1695                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1696                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1697         }
1698
1699         /* INTENA flag is not auto-cleared for interrupt */
1700         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
1701         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
1702                 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
1703                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
1704         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
1705
1706         /* VF bind interrupt */
1707         if (vsi->type == I40E_VSI_SRIOV) {
1708                 __vsi_queues_bind_intr(vsi, msix_vect,
1709                                        vsi->base_queue, vsi->nb_qps,
1710                                        itr_idx);
1711                 return;
1712         }
1713
1714         /* PF & VMDq bind interrupt */
1715         if (rte_intr_dp_is_en(intr_handle)) {
1716                 if (vsi->type == I40E_VSI_MAIN) {
1717                         queue_idx = 0;
1718                         record = 1;
1719                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1720                         struct i40e_vsi *main_vsi =
1721                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1722                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1723                         record = 1;
1724                 }
1725         }
1726
1727         for (i = 0; i < vsi->nb_used_qps; i++) {
1728                 if (nb_msix <= 1) {
1729                         if (!rte_intr_allow_others(intr_handle))
1730                                 /* allow to share MISC_VEC_ID */
1731                                 msix_vect = I40E_MISC_VEC_ID;
1732
1733                         /* no enough msix_vect, map all to one */
1734                         __vsi_queues_bind_intr(vsi, msix_vect,
1735                                                vsi->base_queue + i,
1736                                                vsi->nb_used_qps - i,
1737                                                itr_idx);
1738                         for (; !!record && i < vsi->nb_used_qps; i++)
1739                                 intr_handle->intr_vec[queue_idx + i] =
1740                                         msix_vect;
1741                         break;
1742                 }
1743                 /* 1:1 queue/msix_vect mapping */
1744                 __vsi_queues_bind_intr(vsi, msix_vect,
1745                                        vsi->base_queue + i, 1,
1746                                        itr_idx);
1747                 if (!!record)
1748                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1749
1750                 msix_vect++;
1751                 nb_msix--;
1752         }
1753 }
1754
1755 static void
1756 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1757 {
1758         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1759         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1760         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1761         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1762         uint16_t interval = i40e_calc_itr_interval(\
1763                 RTE_LIBRTE_I40E_ITR_INTERVAL);
1764         uint16_t msix_intr, i;
1765
1766         if (rte_intr_allow_others(intr_handle))
1767                 for (i = 0; i < vsi->nb_msix; i++) {
1768                         msix_intr = vsi->msix_intr + i;
1769                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1770                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1771                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1772                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1773                                 (interval <<
1774                                  I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
1775                 }
1776         else
1777                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1778                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1779                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1780                                (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1781                                (interval <<
1782                                 I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
1783
1784         I40E_WRITE_FLUSH(hw);
1785 }
1786
1787 static void
1788 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1789 {
1790         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1791         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1792         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1793         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1794         uint16_t msix_intr, i;
1795
1796         if (rte_intr_allow_others(intr_handle))
1797                 for (i = 0; i < vsi->nb_msix; i++) {
1798                         msix_intr = vsi->msix_intr + i;
1799                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1800                                        0);
1801                 }
1802         else
1803                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
1804
1805         I40E_WRITE_FLUSH(hw);
1806 }
1807
1808 static inline uint8_t
1809 i40e_parse_link_speeds(uint16_t link_speeds)
1810 {
1811         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1812
1813         if (link_speeds & ETH_LINK_SPEED_40G)
1814                 link_speed |= I40E_LINK_SPEED_40GB;
1815         if (link_speeds & ETH_LINK_SPEED_25G)
1816                 link_speed |= I40E_LINK_SPEED_25GB;
1817         if (link_speeds & ETH_LINK_SPEED_20G)
1818                 link_speed |= I40E_LINK_SPEED_20GB;
1819         if (link_speeds & ETH_LINK_SPEED_10G)
1820                 link_speed |= I40E_LINK_SPEED_10GB;
1821         if (link_speeds & ETH_LINK_SPEED_1G)
1822                 link_speed |= I40E_LINK_SPEED_1GB;
1823         if (link_speeds & ETH_LINK_SPEED_100M)
1824                 link_speed |= I40E_LINK_SPEED_100MB;
1825
1826         return link_speed;
1827 }
1828
1829 static int
1830 i40e_phy_conf_link(struct i40e_hw *hw,
1831                    uint8_t abilities,
1832                    uint8_t force_speed,
1833                    bool is_up)
1834 {
1835         enum i40e_status_code status;
1836         struct i40e_aq_get_phy_abilities_resp phy_ab;
1837         struct i40e_aq_set_phy_config phy_conf;
1838         enum i40e_aq_phy_type cnt;
1839         uint32_t phy_type_mask = 0;
1840
1841         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1842                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1843                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1844                         I40E_AQ_PHY_FLAG_LOW_POWER;
1845         const uint8_t advt = I40E_LINK_SPEED_40GB |
1846                         I40E_LINK_SPEED_25GB |
1847                         I40E_LINK_SPEED_10GB |
1848                         I40E_LINK_SPEED_1GB |
1849                         I40E_LINK_SPEED_100MB;
1850         int ret = -ENOTSUP;
1851
1852
1853         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1854                                               NULL);
1855         if (status)
1856                 return ret;
1857
1858         /* If link already up, no need to set up again */
1859         if (is_up && phy_ab.phy_type != 0)
1860                 return I40E_SUCCESS;
1861
1862         memset(&phy_conf, 0, sizeof(phy_conf));
1863
1864         /* bits 0-2 use the values from get_phy_abilities_resp */
1865         abilities &= ~mask;
1866         abilities |= phy_ab.abilities & mask;
1867
1868         /* update ablities and speed */
1869         if (abilities & I40E_AQ_PHY_AN_ENABLED)
1870                 phy_conf.link_speed = advt;
1871         else
1872                 phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
1873
1874         phy_conf.abilities = abilities;
1875
1876
1877
1878         /* To enable link, phy_type mask needs to include each type */
1879         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
1880                 phy_type_mask |= 1 << cnt;
1881
1882         /* use get_phy_abilities_resp value for the rest */
1883         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
1884         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
1885                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
1886                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
1887         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
1888         phy_conf.eee_capability = phy_ab.eee_capability;
1889         phy_conf.eeer = phy_ab.eeer_val;
1890         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1891
1892         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1893                     phy_ab.abilities, phy_ab.link_speed);
1894         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
1895                     phy_conf.abilities, phy_conf.link_speed);
1896
1897         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
1898         if (status)
1899                 return ret;
1900
1901         return I40E_SUCCESS;
1902 }
1903
1904 static int
1905 i40e_apply_link_speed(struct rte_eth_dev *dev)
1906 {
1907         uint8_t speed;
1908         uint8_t abilities = 0;
1909         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1910         struct rte_eth_conf *conf = &dev->data->dev_conf;
1911
1912         speed = i40e_parse_link_speeds(conf->link_speeds);
1913         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1914         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
1915                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1916         abilities |= I40E_AQ_PHY_LINK_ENABLED;
1917
1918         return i40e_phy_conf_link(hw, abilities, speed, true);
1919 }
1920
1921 static int
1922 i40e_dev_start(struct rte_eth_dev *dev)
1923 {
1924         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1925         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1926         struct i40e_vsi *main_vsi = pf->main_vsi;
1927         int ret, i;
1928         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1929         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1930         uint32_t intr_vector = 0;
1931         struct i40e_vsi *vsi;
1932
1933         hw->adapter_stopped = 0;
1934
1935         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1936                 PMD_INIT_LOG(ERR,
1937                 "Invalid link_speeds for port %u, autonegotiation disabled",
1938                               dev->data->port_id);
1939                 return -EINVAL;
1940         }
1941
1942         rte_intr_disable(intr_handle);
1943
1944         if ((rte_intr_cap_multiple(intr_handle) ||
1945              !RTE_ETH_DEV_SRIOV(dev).active) &&
1946             dev->data->dev_conf.intr_conf.rxq != 0) {
1947                 intr_vector = dev->data->nb_rx_queues;
1948                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
1949                 if (ret)
1950                         return ret;
1951         }
1952
1953         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1954                 intr_handle->intr_vec =
1955                         rte_zmalloc("intr_vec",
1956                                     dev->data->nb_rx_queues * sizeof(int),
1957                                     0);
1958                 if (!intr_handle->intr_vec) {
1959                         PMD_INIT_LOG(ERR,
1960                                 "Failed to allocate %d rx_queues intr_vec",
1961                                 dev->data->nb_rx_queues);
1962                         return -ENOMEM;
1963                 }
1964         }
1965
1966         /* Initialize VSI */
1967         ret = i40e_dev_rxtx_init(pf);
1968         if (ret != I40E_SUCCESS) {
1969                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
1970                 goto err_up;
1971         }
1972
1973         /* Map queues with MSIX interrupt */
1974         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
1975                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1976         i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
1977         i40e_vsi_enable_queues_intr(main_vsi);
1978
1979         /* Map VMDQ VSI queues with MSIX interrupt */
1980         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1981                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1982                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
1983                                           I40E_ITR_INDEX_DEFAULT);
1984                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
1985         }
1986
1987         /* enable FDIR MSIX interrupt */
1988         if (pf->fdir.fdir_vsi) {
1989                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
1990                                           I40E_ITR_INDEX_NONE);
1991                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1992         }
1993
1994         /* Enable all queues which have been configured */
1995         ret = i40e_dev_switch_queues(pf, TRUE);
1996         if (ret != I40E_SUCCESS) {
1997                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
1998                 goto err_up;
1999         }
2000
2001         /* Enable receiving broadcast packets */
2002         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2003         if (ret != I40E_SUCCESS)
2004                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2005
2006         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2007                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2008                                                 true, NULL);
2009                 if (ret != I40E_SUCCESS)
2010                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2011         }
2012
2013         /* Enable the VLAN promiscuous mode. */
2014         if (pf->vfs) {
2015                 for (i = 0; i < pf->vf_num; i++) {
2016                         vsi = pf->vfs[i].vsi;
2017                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2018                                                      true, NULL);
2019                 }
2020         }
2021
2022         /* Apply link configure */
2023         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
2024                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
2025                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
2026                                 ETH_LINK_SPEED_40G)) {
2027                 PMD_DRV_LOG(ERR, "Invalid link setting");
2028                 goto err_up;
2029         }
2030         ret = i40e_apply_link_speed(dev);
2031         if (I40E_SUCCESS != ret) {
2032                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2033                 goto err_up;
2034         }
2035
2036         if (!rte_intr_allow_others(intr_handle)) {
2037                 rte_intr_callback_unregister(intr_handle,
2038                                              i40e_dev_interrupt_handler,
2039                                              (void *)dev);
2040                 /* configure and enable device interrupt */
2041                 i40e_pf_config_irq0(hw, FALSE);
2042                 i40e_pf_enable_irq0(hw);
2043
2044                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2045                         PMD_INIT_LOG(INFO,
2046                                 "lsc won't enable because of no intr multiplex");
2047         } else {
2048                 ret = i40e_aq_set_phy_int_mask(hw,
2049                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2050                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2051                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2052                 if (ret != I40E_SUCCESS)
2053                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2054
2055                 /* Call get_link_info aq commond to enable/disable LSE */
2056                 i40e_dev_link_update(dev, 0);
2057         }
2058
2059         /* enable uio intr after callback register */
2060         rte_intr_enable(intr_handle);
2061
2062         i40e_filter_restore(pf);
2063
2064         if (pf->tm_conf.root && !pf->tm_conf.committed)
2065                 PMD_DRV_LOG(WARNING,
2066                             "please call hierarchy_commit() "
2067                             "before starting the port");
2068
2069         return I40E_SUCCESS;
2070
2071 err_up:
2072         i40e_dev_switch_queues(pf, FALSE);
2073         i40e_dev_clear_queues(dev);
2074
2075         return ret;
2076 }
2077
2078 static void
2079 i40e_dev_stop(struct rte_eth_dev *dev)
2080 {
2081         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2082         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2083         struct i40e_vsi *main_vsi = pf->main_vsi;
2084         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2085         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2086         int i;
2087
2088         if (hw->adapter_stopped == 1)
2089                 return;
2090         /* Disable all queues */
2091         i40e_dev_switch_queues(pf, FALSE);
2092
2093         /* un-map queues with interrupt registers */
2094         i40e_vsi_disable_queues_intr(main_vsi);
2095         i40e_vsi_queues_unbind_intr(main_vsi);
2096
2097         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2098                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2099                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2100         }
2101
2102         if (pf->fdir.fdir_vsi) {
2103                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2104                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2105         }
2106         /* Clear all queues and release memory */
2107         i40e_dev_clear_queues(dev);
2108
2109         /* Set link down */
2110         i40e_dev_set_link_down(dev);
2111
2112         if (!rte_intr_allow_others(intr_handle))
2113                 /* resume to the default handler */
2114                 rte_intr_callback_register(intr_handle,
2115                                            i40e_dev_interrupt_handler,
2116                                            (void *)dev);
2117
2118         /* Clean datapath event and queue/vec mapping */
2119         rte_intr_efd_disable(intr_handle);
2120         if (intr_handle->intr_vec) {
2121                 rte_free(intr_handle->intr_vec);
2122                 intr_handle->intr_vec = NULL;
2123         }
2124
2125         /* reset hierarchy commit */
2126         pf->tm_conf.committed = false;
2127
2128         /* Remove all the queue region configuration */
2129         i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
2130
2131         hw->adapter_stopped = 1;
2132 }
2133
2134 static void
2135 i40e_dev_close(struct rte_eth_dev *dev)
2136 {
2137         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2138         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2139         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2140         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2141         struct i40e_mirror_rule *p_mirror;
2142         uint32_t reg;
2143         int i;
2144         int ret;
2145
2146         PMD_INIT_FUNC_TRACE();
2147
2148         i40e_dev_stop(dev);
2149
2150         /* Remove all mirror rules */
2151         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2152                 ret = i40e_aq_del_mirror_rule(hw,
2153                                               pf->main_vsi->veb->seid,
2154                                               p_mirror->rule_type,
2155                                               p_mirror->entries,
2156                                               p_mirror->num_entries,
2157                                               p_mirror->id);
2158                 if (ret < 0)
2159                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2160                                     "status = %d, aq_err = %d.", ret,
2161                                     hw->aq.asq_last_status);
2162
2163                 /* remove mirror software resource anyway */
2164                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2165                 rte_free(p_mirror);
2166                 pf->nb_mirror_rule--;
2167         }
2168
2169         i40e_dev_free_queues(dev);
2170
2171         /* Disable interrupt */
2172         i40e_pf_disable_irq0(hw);
2173         rte_intr_disable(intr_handle);
2174
2175         /* shutdown and destroy the HMC */
2176         i40e_shutdown_lan_hmc(hw);
2177
2178         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2179                 i40e_vsi_release(pf->vmdq[i].vsi);
2180                 pf->vmdq[i].vsi = NULL;
2181         }
2182         rte_free(pf->vmdq);
2183         pf->vmdq = NULL;
2184
2185         /* release all the existing VSIs and VEBs */
2186         i40e_fdir_teardown(pf);
2187         i40e_vsi_release(pf->main_vsi);
2188
2189         /* shutdown the adminq */
2190         i40e_aq_queue_shutdown(hw, true);
2191         i40e_shutdown_adminq(hw);
2192
2193         i40e_res_pool_destroy(&pf->qp_pool);
2194         i40e_res_pool_destroy(&pf->msix_pool);
2195
2196         /* force a PF reset to clean anything leftover */
2197         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2198         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2199                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2200         I40E_WRITE_FLUSH(hw);
2201 }
2202
2203 /*
2204  * Reset PF device only to re-initialize resources in PMD layer
2205  */
2206 static int
2207 i40e_dev_reset(struct rte_eth_dev *dev)
2208 {
2209         int ret;
2210
2211         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2212          * its VF to make them align with it. The detailed notification
2213          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2214          * To avoid unexpected behavior in VF, currently reset of PF with
2215          * SR-IOV activation is not supported. It might be supported later.
2216          */
2217         if (dev->data->sriov.active)
2218                 return -ENOTSUP;
2219
2220         ret = eth_i40e_dev_uninit(dev);
2221         if (ret)
2222                 return ret;
2223
2224         ret = eth_i40e_dev_init(dev);
2225
2226         return ret;
2227 }
2228
2229 static void
2230 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2231 {
2232         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2233         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2234         struct i40e_vsi *vsi = pf->main_vsi;
2235         int status;
2236
2237         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2238                                                      true, NULL, true);
2239         if (status != I40E_SUCCESS)
2240                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2241
2242         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2243                                                         TRUE, NULL);
2244         if (status != I40E_SUCCESS)
2245                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2246
2247 }
2248
2249 static void
2250 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2251 {
2252         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2253         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2254         struct i40e_vsi *vsi = pf->main_vsi;
2255         int status;
2256
2257         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2258                                                      false, NULL, true);
2259         if (status != I40E_SUCCESS)
2260                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2261
2262         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2263                                                         false, NULL);
2264         if (status != I40E_SUCCESS)
2265                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2266 }
2267
2268 static void
2269 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2270 {
2271         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2272         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2273         struct i40e_vsi *vsi = pf->main_vsi;
2274         int ret;
2275
2276         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2277         if (ret != I40E_SUCCESS)
2278                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2279 }
2280
2281 static void
2282 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2283 {
2284         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2285         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2286         struct i40e_vsi *vsi = pf->main_vsi;
2287         int ret;
2288
2289         if (dev->data->promiscuous == 1)
2290                 return; /* must remain in all_multicast mode */
2291
2292         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2293                                 vsi->seid, FALSE, NULL);
2294         if (ret != I40E_SUCCESS)
2295                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2296 }
2297
2298 /*
2299  * Set device link up.
2300  */
2301 static int
2302 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2303 {
2304         /* re-apply link speed setting */
2305         return i40e_apply_link_speed(dev);
2306 }
2307
2308 /*
2309  * Set device link down.
2310  */
2311 static int
2312 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2313 {
2314         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2315         uint8_t abilities = 0;
2316         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2317
2318         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2319         return i40e_phy_conf_link(hw, abilities, speed, false);
2320 }
2321
2322 int
2323 i40e_dev_link_update(struct rte_eth_dev *dev,
2324                      int wait_to_complete)
2325 {
2326 #define CHECK_INTERVAL 100  /* 100ms */
2327 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
2328         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2329         struct i40e_link_status link_status;
2330         struct rte_eth_link link, old;
2331         int status;
2332         unsigned rep_cnt = MAX_REPEAT_TIME;
2333         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2334
2335         memset(&link, 0, sizeof(link));
2336         memset(&old, 0, sizeof(old));
2337         memset(&link_status, 0, sizeof(link_status));
2338         rte_i40e_dev_atomic_read_link_status(dev, &old);
2339
2340         do {
2341                 /* Get link status information from hardware */
2342                 status = i40e_aq_get_link_info(hw, enable_lse,
2343                                                 &link_status, NULL);
2344                 if (status != I40E_SUCCESS) {
2345                         link.link_speed = ETH_SPEED_NUM_100M;
2346                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2347                         PMD_DRV_LOG(ERR, "Failed to get link info");
2348                         goto out;
2349                 }
2350
2351                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
2352                 if (!wait_to_complete || link.link_status)
2353                         break;
2354
2355                 rte_delay_ms(CHECK_INTERVAL);
2356         } while (--rep_cnt);
2357
2358         if (!link.link_status)
2359                 goto out;
2360
2361         /* i40e uses full duplex only */
2362         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2363
2364         /* Parse the link status */
2365         switch (link_status.link_speed) {
2366         case I40E_LINK_SPEED_100MB:
2367                 link.link_speed = ETH_SPEED_NUM_100M;
2368                 break;
2369         case I40E_LINK_SPEED_1GB:
2370                 link.link_speed = ETH_SPEED_NUM_1G;
2371                 break;
2372         case I40E_LINK_SPEED_10GB:
2373                 link.link_speed = ETH_SPEED_NUM_10G;
2374                 break;
2375         case I40E_LINK_SPEED_20GB:
2376                 link.link_speed = ETH_SPEED_NUM_20G;
2377                 break;
2378         case I40E_LINK_SPEED_25GB:
2379                 link.link_speed = ETH_SPEED_NUM_25G;
2380                 break;
2381         case I40E_LINK_SPEED_40GB:
2382                 link.link_speed = ETH_SPEED_NUM_40G;
2383                 break;
2384         default:
2385                 link.link_speed = ETH_SPEED_NUM_100M;
2386                 break;
2387         }
2388
2389         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2390                         ETH_LINK_SPEED_FIXED);
2391
2392 out:
2393         rte_i40e_dev_atomic_write_link_status(dev, &link);
2394         if (link.link_status == old.link_status)
2395                 return -1;
2396
2397         i40e_notify_all_vfs_link_status(dev);
2398
2399         return 0;
2400 }
2401
2402 /* Get all the statistics of a VSI */
2403 void
2404 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2405 {
2406         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2407         struct i40e_eth_stats *nes = &vsi->eth_stats;
2408         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2409         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2410
2411         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2412                             vsi->offset_loaded, &oes->rx_bytes,
2413                             &nes->rx_bytes);
2414         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2415                             vsi->offset_loaded, &oes->rx_unicast,
2416                             &nes->rx_unicast);
2417         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2418                             vsi->offset_loaded, &oes->rx_multicast,
2419                             &nes->rx_multicast);
2420         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2421                             vsi->offset_loaded, &oes->rx_broadcast,
2422                             &nes->rx_broadcast);
2423         /* exclude CRC bytes */
2424         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2425                 nes->rx_broadcast) * ETHER_CRC_LEN;
2426
2427         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2428                             &oes->rx_discards, &nes->rx_discards);
2429         /* GLV_REPC not supported */
2430         /* GLV_RMPC not supported */
2431         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2432                             &oes->rx_unknown_protocol,
2433                             &nes->rx_unknown_protocol);
2434         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2435                             vsi->offset_loaded, &oes->tx_bytes,
2436                             &nes->tx_bytes);
2437         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2438                             vsi->offset_loaded, &oes->tx_unicast,
2439                             &nes->tx_unicast);
2440         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2441                             vsi->offset_loaded, &oes->tx_multicast,
2442                             &nes->tx_multicast);
2443         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2444                             vsi->offset_loaded,  &oes->tx_broadcast,
2445                             &nes->tx_broadcast);
2446         /* GLV_TDPC not supported */
2447         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2448                             &oes->tx_errors, &nes->tx_errors);
2449         vsi->offset_loaded = true;
2450
2451         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2452                     vsi->vsi_id);
2453         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2454         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2455         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2456         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2457         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2458         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2459                     nes->rx_unknown_protocol);
2460         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2461         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2462         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2463         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2464         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2465         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2466         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2467                     vsi->vsi_id);
2468 }
2469
2470 static void
2471 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2472 {
2473         unsigned int i;
2474         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2475         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2476
2477         /* Get rx/tx bytes of internal transfer packets */
2478         i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2479                         I40E_GLV_GORCL(hw->port),
2480                         pf->offset_loaded,
2481                         &pf->internal_stats_offset.rx_bytes,
2482                         &pf->internal_stats.rx_bytes);
2483
2484         i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2485                         I40E_GLV_GOTCL(hw->port),
2486                         pf->offset_loaded,
2487                         &pf->internal_stats_offset.tx_bytes,
2488                         &pf->internal_stats.tx_bytes);
2489         /* Get total internal rx packet count */
2490         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2491                             I40E_GLV_UPRCL(hw->port),
2492                             pf->offset_loaded,
2493                             &pf->internal_stats_offset.rx_unicast,
2494                             &pf->internal_stats.rx_unicast);
2495         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2496                             I40E_GLV_MPRCL(hw->port),
2497                             pf->offset_loaded,
2498                             &pf->internal_stats_offset.rx_multicast,
2499                             &pf->internal_stats.rx_multicast);
2500         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2501                             I40E_GLV_BPRCL(hw->port),
2502                             pf->offset_loaded,
2503                             &pf->internal_stats_offset.rx_broadcast,
2504                             &pf->internal_stats.rx_broadcast);
2505
2506         /* exclude CRC size */
2507         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2508                 pf->internal_stats.rx_multicast +
2509                 pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2510
2511         /* Get statistics of struct i40e_eth_stats */
2512         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2513                             I40E_GLPRT_GORCL(hw->port),
2514                             pf->offset_loaded, &os->eth.rx_bytes,
2515                             &ns->eth.rx_bytes);
2516         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2517                             I40E_GLPRT_UPRCL(hw->port),
2518                             pf->offset_loaded, &os->eth.rx_unicast,
2519                             &ns->eth.rx_unicast);
2520         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2521                             I40E_GLPRT_MPRCL(hw->port),
2522                             pf->offset_loaded, &os->eth.rx_multicast,
2523                             &ns->eth.rx_multicast);
2524         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2525                             I40E_GLPRT_BPRCL(hw->port),
2526                             pf->offset_loaded, &os->eth.rx_broadcast,
2527                             &ns->eth.rx_broadcast);
2528         /* Workaround: CRC size should not be included in byte statistics,
2529          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2530          */
2531         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2532                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2533
2534         /* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2535          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negtive
2536          * value.
2537          */
2538         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2539                 ns->eth.rx_bytes = 0;
2540         /* exlude internal rx bytes */
2541         else
2542                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2543
2544         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2545                             pf->offset_loaded, &os->eth.rx_discards,
2546                             &ns->eth.rx_discards);
2547         /* GLPRT_REPC not supported */
2548         /* GLPRT_RMPC not supported */
2549         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2550                             pf->offset_loaded,
2551                             &os->eth.rx_unknown_protocol,
2552                             &ns->eth.rx_unknown_protocol);
2553         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2554                             I40E_GLPRT_GOTCL(hw->port),
2555                             pf->offset_loaded, &os->eth.tx_bytes,
2556                             &ns->eth.tx_bytes);
2557         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2558                             I40E_GLPRT_UPTCL(hw->port),
2559                             pf->offset_loaded, &os->eth.tx_unicast,
2560                             &ns->eth.tx_unicast);
2561         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2562                             I40E_GLPRT_MPTCL(hw->port),
2563                             pf->offset_loaded, &os->eth.tx_multicast,
2564                             &ns->eth.tx_multicast);
2565         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2566                             I40E_GLPRT_BPTCL(hw->port),
2567                             pf->offset_loaded, &os->eth.tx_broadcast,
2568                             &ns->eth.tx_broadcast);
2569         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2570                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2571
2572         /* exclude internal tx bytes */
2573         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
2574                 ns->eth.tx_bytes = 0;
2575         else
2576                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
2577
2578         /* GLPRT_TEPC not supported */
2579
2580         /* additional port specific stats */
2581         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2582                             pf->offset_loaded, &os->tx_dropped_link_down,
2583                             &ns->tx_dropped_link_down);
2584         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2585                             pf->offset_loaded, &os->crc_errors,
2586                             &ns->crc_errors);
2587         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2588                             pf->offset_loaded, &os->illegal_bytes,
2589                             &ns->illegal_bytes);
2590         /* GLPRT_ERRBC not supported */
2591         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2592                             pf->offset_loaded, &os->mac_local_faults,
2593                             &ns->mac_local_faults);
2594         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2595                             pf->offset_loaded, &os->mac_remote_faults,
2596                             &ns->mac_remote_faults);
2597         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2598                             pf->offset_loaded, &os->rx_length_errors,
2599                             &ns->rx_length_errors);
2600         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2601                             pf->offset_loaded, &os->link_xon_rx,
2602                             &ns->link_xon_rx);
2603         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2604                             pf->offset_loaded, &os->link_xoff_rx,
2605                             &ns->link_xoff_rx);
2606         for (i = 0; i < 8; i++) {
2607                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2608                                     pf->offset_loaded,
2609                                     &os->priority_xon_rx[i],
2610                                     &ns->priority_xon_rx[i]);
2611                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2612                                     pf->offset_loaded,
2613                                     &os->priority_xoff_rx[i],
2614                                     &ns->priority_xoff_rx[i]);
2615         }
2616         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2617                             pf->offset_loaded, &os->link_xon_tx,
2618                             &ns->link_xon_tx);
2619         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2620                             pf->offset_loaded, &os->link_xoff_tx,
2621                             &ns->link_xoff_tx);
2622         for (i = 0; i < 8; i++) {
2623                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2624                                     pf->offset_loaded,
2625                                     &os->priority_xon_tx[i],
2626                                     &ns->priority_xon_tx[i]);
2627                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2628                                     pf->offset_loaded,
2629                                     &os->priority_xoff_tx[i],
2630                                     &ns->priority_xoff_tx[i]);
2631                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2632                                     pf->offset_loaded,
2633                                     &os->priority_xon_2_xoff[i],
2634                                     &ns->priority_xon_2_xoff[i]);
2635         }
2636         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2637                             I40E_GLPRT_PRC64L(hw->port),
2638                             pf->offset_loaded, &os->rx_size_64,
2639                             &ns->rx_size_64);
2640         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2641                             I40E_GLPRT_PRC127L(hw->port),
2642                             pf->offset_loaded, &os->rx_size_127,
2643                             &ns->rx_size_127);
2644         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2645                             I40E_GLPRT_PRC255L(hw->port),
2646                             pf->offset_loaded, &os->rx_size_255,
2647                             &ns->rx_size_255);
2648         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2649                             I40E_GLPRT_PRC511L(hw->port),
2650                             pf->offset_loaded, &os->rx_size_511,
2651                             &ns->rx_size_511);
2652         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2653                             I40E_GLPRT_PRC1023L(hw->port),
2654                             pf->offset_loaded, &os->rx_size_1023,
2655                             &ns->rx_size_1023);
2656         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2657                             I40E_GLPRT_PRC1522L(hw->port),
2658                             pf->offset_loaded, &os->rx_size_1522,
2659                             &ns->rx_size_1522);
2660         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2661                             I40E_GLPRT_PRC9522L(hw->port),
2662                             pf->offset_loaded, &os->rx_size_big,
2663                             &ns->rx_size_big);
2664         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2665                             pf->offset_loaded, &os->rx_undersize,
2666                             &ns->rx_undersize);
2667         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2668                             pf->offset_loaded, &os->rx_fragments,
2669                             &ns->rx_fragments);
2670         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2671                             pf->offset_loaded, &os->rx_oversize,
2672                             &ns->rx_oversize);
2673         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2674                             pf->offset_loaded, &os->rx_jabber,
2675                             &ns->rx_jabber);
2676         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2677                             I40E_GLPRT_PTC64L(hw->port),
2678                             pf->offset_loaded, &os->tx_size_64,
2679                             &ns->tx_size_64);
2680         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2681                             I40E_GLPRT_PTC127L(hw->port),
2682                             pf->offset_loaded, &os->tx_size_127,
2683                             &ns->tx_size_127);
2684         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2685                             I40E_GLPRT_PTC255L(hw->port),
2686                             pf->offset_loaded, &os->tx_size_255,
2687                             &ns->tx_size_255);
2688         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2689                             I40E_GLPRT_PTC511L(hw->port),
2690                             pf->offset_loaded, &os->tx_size_511,
2691                             &ns->tx_size_511);
2692         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2693                             I40E_GLPRT_PTC1023L(hw->port),
2694                             pf->offset_loaded, &os->tx_size_1023,
2695                             &ns->tx_size_1023);
2696         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2697                             I40E_GLPRT_PTC1522L(hw->port),
2698                             pf->offset_loaded, &os->tx_size_1522,
2699                             &ns->tx_size_1522);
2700         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2701                             I40E_GLPRT_PTC9522L(hw->port),
2702                             pf->offset_loaded, &os->tx_size_big,
2703                             &ns->tx_size_big);
2704         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2705                            pf->offset_loaded,
2706                            &os->fd_sb_match, &ns->fd_sb_match);
2707         /* GLPRT_MSPDC not supported */
2708         /* GLPRT_XEC not supported */
2709
2710         pf->offset_loaded = true;
2711
2712         if (pf->main_vsi)
2713                 i40e_update_vsi_stats(pf->main_vsi);
2714 }
2715
2716 /* Get all statistics of a port */
2717 static int
2718 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2719 {
2720         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2721         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2722         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2723         unsigned i;
2724
2725         /* call read registers - updates values, now write them to struct */
2726         i40e_read_stats_registers(pf, hw);
2727
2728         stats->ipackets = ns->eth.rx_unicast +
2729                         ns->eth.rx_multicast +
2730                         ns->eth.rx_broadcast -
2731                         ns->eth.rx_discards -
2732                         pf->main_vsi->eth_stats.rx_discards;
2733         stats->opackets = ns->eth.tx_unicast +
2734                         ns->eth.tx_multicast +
2735                         ns->eth.tx_broadcast;
2736         stats->ibytes   = ns->eth.rx_bytes;
2737         stats->obytes   = ns->eth.tx_bytes;
2738         stats->oerrors  = ns->eth.tx_errors +
2739                         pf->main_vsi->eth_stats.tx_errors;
2740
2741         /* Rx Errors */
2742         stats->imissed  = ns->eth.rx_discards +
2743                         pf->main_vsi->eth_stats.rx_discards;
2744         stats->ierrors  = ns->crc_errors +
2745                         ns->rx_length_errors + ns->rx_undersize +
2746                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2747
2748         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2749         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2750         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2751         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2752         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2753         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2754         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2755                     ns->eth.rx_unknown_protocol);
2756         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2757         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2758         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2759         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2760         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2761         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2762
2763         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2764                     ns->tx_dropped_link_down);
2765         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2766         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2767                     ns->illegal_bytes);
2768         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2769         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2770                     ns->mac_local_faults);
2771         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2772                     ns->mac_remote_faults);
2773         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2774                     ns->rx_length_errors);
2775         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2776         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2777         for (i = 0; i < 8; i++) {
2778                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
2779                                 i, ns->priority_xon_rx[i]);
2780                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
2781                                 i, ns->priority_xoff_rx[i]);
2782         }
2783         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
2784         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
2785         for (i = 0; i < 8; i++) {
2786                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
2787                                 i, ns->priority_xon_tx[i]);
2788                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
2789                                 i, ns->priority_xoff_tx[i]);
2790                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
2791                                 i, ns->priority_xon_2_xoff[i]);
2792         }
2793         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
2794         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
2795         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
2796         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
2797         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
2798         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
2799         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
2800         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
2801         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
2802         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
2803         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
2804         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
2805         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
2806         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
2807         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
2808         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
2809         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
2810         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
2811         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2812                         ns->mac_short_packet_dropped);
2813         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
2814                     ns->checksum_error);
2815         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
2816         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2817         return 0;
2818 }
2819
2820 /* Reset the statistics */
2821 static void
2822 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2823 {
2824         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2825         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2826
2827         /* Mark PF and VSI stats to update the offset, aka "reset" */
2828         pf->offset_loaded = false;
2829         if (pf->main_vsi)
2830                 pf->main_vsi->offset_loaded = false;
2831
2832         /* read the stats, reading current register values into offset */
2833         i40e_read_stats_registers(pf, hw);
2834 }
2835
2836 static uint32_t
2837 i40e_xstats_calc_num(void)
2838 {
2839         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
2840                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
2841                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
2842 }
2843
2844 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2845                                      struct rte_eth_xstat_name *xstats_names,
2846                                      __rte_unused unsigned limit)
2847 {
2848         unsigned count = 0;
2849         unsigned i, prio;
2850
2851         if (xstats_names == NULL)
2852                 return i40e_xstats_calc_num();
2853
2854         /* Note: limit checked in rte_eth_xstats_names() */
2855
2856         /* Get stats from i40e_eth_stats struct */
2857         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2858                 snprintf(xstats_names[count].name,
2859                          sizeof(xstats_names[count].name),
2860                          "%s", rte_i40e_stats_strings[i].name);
2861                 count++;
2862         }
2863
2864         /* Get individiual stats from i40e_hw_port struct */
2865         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2866                 snprintf(xstats_names[count].name,
2867                         sizeof(xstats_names[count].name),
2868                          "%s", rte_i40e_hw_port_strings[i].name);
2869                 count++;
2870         }
2871
2872         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2873                 for (prio = 0; prio < 8; prio++) {
2874                         snprintf(xstats_names[count].name,
2875                                  sizeof(xstats_names[count].name),
2876                                  "rx_priority%u_%s", prio,
2877                                  rte_i40e_rxq_prio_strings[i].name);
2878                         count++;
2879                 }
2880         }
2881
2882         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2883                 for (prio = 0; prio < 8; prio++) {
2884                         snprintf(xstats_names[count].name,
2885                                  sizeof(xstats_names[count].name),
2886                                  "tx_priority%u_%s", prio,
2887                                  rte_i40e_txq_prio_strings[i].name);
2888                         count++;
2889                 }
2890         }
2891         return count;
2892 }
2893
2894 static int
2895 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2896                     unsigned n)
2897 {
2898         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2899         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2900         unsigned i, count, prio;
2901         struct i40e_hw_port_stats *hw_stats = &pf->stats;
2902
2903         count = i40e_xstats_calc_num();
2904         if (n < count)
2905                 return count;
2906
2907         i40e_read_stats_registers(pf, hw);
2908
2909         if (xstats == NULL)
2910                 return 0;
2911
2912         count = 0;
2913
2914         /* Get stats from i40e_eth_stats struct */
2915         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2916                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
2917                         rte_i40e_stats_strings[i].offset);
2918                 xstats[count].id = count;
2919                 count++;
2920         }
2921
2922         /* Get individiual stats from i40e_hw_port struct */
2923         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2924                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2925                         rte_i40e_hw_port_strings[i].offset);
2926                 xstats[count].id = count;
2927                 count++;
2928         }
2929
2930         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2931                 for (prio = 0; prio < 8; prio++) {
2932                         xstats[count].value =
2933                                 *(uint64_t *)(((char *)hw_stats) +
2934                                 rte_i40e_rxq_prio_strings[i].offset +
2935                                 (sizeof(uint64_t) * prio));
2936                         xstats[count].id = count;
2937                         count++;
2938                 }
2939         }
2940
2941         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2942                 for (prio = 0; prio < 8; prio++) {
2943                         xstats[count].value =
2944                                 *(uint64_t *)(((char *)hw_stats) +
2945                                 rte_i40e_txq_prio_strings[i].offset +
2946                                 (sizeof(uint64_t) * prio));
2947                         xstats[count].id = count;
2948                         count++;
2949                 }
2950         }
2951
2952         return count;
2953 }
2954
2955 static int
2956 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
2957                                  __rte_unused uint16_t queue_id,
2958                                  __rte_unused uint8_t stat_idx,
2959                                  __rte_unused uint8_t is_rx)
2960 {
2961         PMD_INIT_FUNC_TRACE();
2962
2963         return -ENOSYS;
2964 }
2965
2966 static int
2967 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2968 {
2969         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2970         u32 full_ver;
2971         u8 ver, patch;
2972         u16 build;
2973         int ret;
2974
2975         full_ver = hw->nvm.oem_ver;
2976         ver = (u8)(full_ver >> 24);
2977         build = (u16)((full_ver >> 8) & 0xffff);
2978         patch = (u8)(full_ver & 0xff);
2979
2980         ret = snprintf(fw_version, fw_size,
2981                  "%d.%d%d 0x%08x %d.%d.%d",
2982                  ((hw->nvm.version >> 12) & 0xf),
2983                  ((hw->nvm.version >> 4) & 0xff),
2984                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
2985                  ver, build, patch);
2986
2987         ret += 1; /* add the size of '\0' */
2988         if (fw_size < (u32)ret)
2989                 return ret;
2990         else
2991                 return 0;
2992 }
2993
2994 static void
2995 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2996 {
2997         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2998         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2999         struct i40e_vsi *vsi = pf->main_vsi;
3000         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3001
3002         dev_info->pci_dev = pci_dev;
3003         dev_info->max_rx_queues = vsi->nb_qps;
3004         dev_info->max_tx_queues = vsi->nb_qps;
3005         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3006         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3007         dev_info->max_mac_addrs = vsi->max_macaddrs;
3008         dev_info->max_vfs = pci_dev->max_vfs;
3009         dev_info->rx_offload_capa =
3010                 DEV_RX_OFFLOAD_VLAN_STRIP |
3011                 DEV_RX_OFFLOAD_QINQ_STRIP |
3012                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3013                 DEV_RX_OFFLOAD_UDP_CKSUM |
3014                 DEV_RX_OFFLOAD_TCP_CKSUM;
3015         dev_info->tx_offload_capa =
3016                 DEV_TX_OFFLOAD_VLAN_INSERT |
3017                 DEV_TX_OFFLOAD_QINQ_INSERT |
3018                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3019                 DEV_TX_OFFLOAD_UDP_CKSUM |
3020                 DEV_TX_OFFLOAD_TCP_CKSUM |
3021                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3022                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3023                 DEV_TX_OFFLOAD_TCP_TSO |
3024                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3025                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3026                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3027                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
3028         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3029                                                 sizeof(uint32_t);
3030         dev_info->reta_size = pf->hash_lut_size;
3031         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3032
3033         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3034                 .rx_thresh = {
3035                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3036                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3037                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3038                 },
3039                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3040                 .rx_drop_en = 0,
3041         };
3042
3043         dev_info->default_txconf = (struct rte_eth_txconf) {
3044                 .tx_thresh = {
3045                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3046                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3047                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3048                 },
3049                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3050                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3051                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3052                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3053         };
3054
3055         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3056                 .nb_max = I40E_MAX_RING_DESC,
3057                 .nb_min = I40E_MIN_RING_DESC,
3058                 .nb_align = I40E_ALIGN_RING_DESC,
3059         };
3060
3061         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3062                 .nb_max = I40E_MAX_RING_DESC,
3063                 .nb_min = I40E_MIN_RING_DESC,
3064                 .nb_align = I40E_ALIGN_RING_DESC,
3065                 .nb_seg_max = I40E_TX_MAX_SEG,
3066                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3067         };
3068
3069         if (pf->flags & I40E_FLAG_VMDQ) {
3070                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3071                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3072                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3073                                                 pf->max_nb_vmdq_vsi;
3074                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3075                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3076                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3077         }
3078
3079         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
3080                 /* For XL710 */
3081                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3082         else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
3083                 /* For XXV710 */
3084                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3085         else
3086                 /* For X710 */
3087                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3088 }
3089
3090 static int
3091 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3092 {
3093         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3094         struct i40e_vsi *vsi = pf->main_vsi;
3095         PMD_INIT_FUNC_TRACE();
3096
3097         if (on)
3098                 return i40e_vsi_add_vlan(vsi, vlan_id);
3099         else
3100                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3101 }
3102
3103 static int
3104 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3105                                 enum rte_vlan_type vlan_type,
3106                                 uint16_t tpid, int qinq)
3107 {
3108         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3109         uint64_t reg_r = 0;
3110         uint64_t reg_w = 0;
3111         uint16_t reg_id = 3;
3112         int ret;
3113
3114         if (qinq) {
3115                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3116                         reg_id = 2;
3117         }
3118
3119         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3120                                           &reg_r, NULL);
3121         if (ret != I40E_SUCCESS) {
3122                 PMD_DRV_LOG(ERR,
3123                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3124                            reg_id);
3125                 return -EIO;
3126         }
3127         PMD_DRV_LOG(DEBUG,
3128                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3129                     reg_id, reg_r);
3130
3131         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3132         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3133         if (reg_r == reg_w) {
3134                 PMD_DRV_LOG(DEBUG, "No need to write");
3135                 return 0;
3136         }
3137
3138         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3139                                            reg_w, NULL);
3140         if (ret != I40E_SUCCESS) {
3141                 PMD_DRV_LOG(ERR,
3142                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3143                             reg_id);
3144                 return -EIO;
3145         }
3146         PMD_DRV_LOG(DEBUG,
3147                     "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
3148                     reg_w, reg_id);
3149
3150         return 0;
3151 }
3152
3153 static int
3154 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3155                    enum rte_vlan_type vlan_type,
3156                    uint16_t tpid)
3157 {
3158         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3159         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
3160         int ret = 0;
3161
3162         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3163              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3164             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3165                 PMD_DRV_LOG(ERR,
3166                             "Unsupported vlan type.");
3167                 return -EINVAL;
3168         }
3169         /* 802.1ad frames ability is added in NVM API 1.7*/
3170         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3171                 if (qinq) {
3172                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3173                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3174                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3175                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3176                 } else {
3177                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3178                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3179                 }
3180                 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3181                 if (ret != I40E_SUCCESS) {
3182                         PMD_DRV_LOG(ERR,
3183                                     "Set switch config failed aq_err: %d",
3184                                     hw->aq.asq_last_status);
3185                         ret = -EIO;
3186                 }
3187         } else
3188                 /* If NVM API < 1.7, keep the register setting */
3189                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3190                                                       tpid, qinq);
3191
3192         return ret;
3193 }
3194
3195 static int
3196 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3197 {
3198         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3199         struct i40e_vsi *vsi = pf->main_vsi;
3200
3201         if (mask & ETH_VLAN_FILTER_MASK) {
3202                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3203                         i40e_vsi_config_vlan_filter(vsi, TRUE);
3204                 else
3205                         i40e_vsi_config_vlan_filter(vsi, FALSE);
3206         }
3207
3208         if (mask & ETH_VLAN_STRIP_MASK) {
3209                 /* Enable or disable VLAN stripping */
3210                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
3211                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
3212                 else
3213                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
3214         }
3215
3216         if (mask & ETH_VLAN_EXTEND_MASK) {
3217                 if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
3218                         i40e_vsi_config_double_vlan(vsi, TRUE);
3219                         /* Set global registers with default ethertype. */
3220                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3221                                            ETHER_TYPE_VLAN);
3222                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3223                                            ETHER_TYPE_VLAN);
3224                 }
3225                 else
3226                         i40e_vsi_config_double_vlan(vsi, FALSE);
3227         }
3228
3229         return 0;
3230 }
3231
3232 static void
3233 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3234                           __rte_unused uint16_t queue,
3235                           __rte_unused int on)
3236 {
3237         PMD_INIT_FUNC_TRACE();
3238 }
3239
3240 static int
3241 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3242 {
3243         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3244         struct i40e_vsi *vsi = pf->main_vsi;
3245         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3246         struct i40e_vsi_vlan_pvid_info info;
3247
3248         memset(&info, 0, sizeof(info));
3249         info.on = on;
3250         if (info.on)
3251                 info.config.pvid = pvid;
3252         else {
3253                 info.config.reject.tagged =
3254                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
3255                 info.config.reject.untagged =
3256                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
3257         }
3258
3259         return i40e_vsi_vlan_pvid_set(vsi, &info);
3260 }
3261
3262 static int
3263 i40e_dev_led_on(struct rte_eth_dev *dev)
3264 {
3265         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3266         uint32_t mode = i40e_led_get(hw);
3267
3268         if (mode == 0)
3269                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3270
3271         return 0;
3272 }
3273
3274 static int
3275 i40e_dev_led_off(struct rte_eth_dev *dev)
3276 {
3277         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3278         uint32_t mode = i40e_led_get(hw);
3279
3280         if (mode != 0)
3281                 i40e_led_set(hw, 0, false);
3282
3283         return 0;
3284 }
3285
3286 static int
3287 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3288 {
3289         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3290         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3291
3292         fc_conf->pause_time = pf->fc_conf.pause_time;
3293
3294         /* read out from register, in case they are modified by other port */
3295         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3296                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3297         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3298                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3299
3300         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3301         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3302
3303          /* Return current mode according to actual setting*/
3304         switch (hw->fc.current_mode) {
3305         case I40E_FC_FULL:
3306                 fc_conf->mode = RTE_FC_FULL;
3307                 break;
3308         case I40E_FC_TX_PAUSE:
3309                 fc_conf->mode = RTE_FC_TX_PAUSE;
3310                 break;
3311         case I40E_FC_RX_PAUSE:
3312                 fc_conf->mode = RTE_FC_RX_PAUSE;
3313                 break;
3314         case I40E_FC_NONE:
3315         default:
3316                 fc_conf->mode = RTE_FC_NONE;
3317         };
3318
3319         return 0;
3320 }
3321
3322 static int
3323 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3324 {
3325         uint32_t mflcn_reg, fctrl_reg, reg;
3326         uint32_t max_high_water;
3327         uint8_t i, aq_failure;
3328         int err;
3329         struct i40e_hw *hw;
3330         struct i40e_pf *pf;
3331         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3332                 [RTE_FC_NONE] = I40E_FC_NONE,
3333                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3334                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3335                 [RTE_FC_FULL] = I40E_FC_FULL
3336         };
3337
3338         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3339
3340         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3341         if ((fc_conf->high_water > max_high_water) ||
3342                         (fc_conf->high_water < fc_conf->low_water)) {
3343                 PMD_INIT_LOG(ERR,
3344                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
3345                         max_high_water);
3346                 return -EINVAL;
3347         }
3348
3349         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3350         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3351         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3352
3353         pf->fc_conf.pause_time = fc_conf->pause_time;
3354         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3355         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3356
3357         PMD_INIT_FUNC_TRACE();
3358
3359         /* All the link flow control related enable/disable register
3360          * configuration is handle by the F/W
3361          */
3362         err = i40e_set_fc(hw, &aq_failure, true);
3363         if (err < 0)
3364                 return -ENOSYS;
3365
3366         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3367                 /* Configure flow control refresh threshold,
3368                  * the value for stat_tx_pause_refresh_timer[8]
3369                  * is used for global pause operation.
3370                  */
3371
3372                 I40E_WRITE_REG(hw,
3373                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3374                                pf->fc_conf.pause_time);
3375
3376                 /* configure the timer value included in transmitted pause
3377                  * frame,
3378                  * the value for stat_tx_pause_quanta[8] is used for global
3379                  * pause operation
3380                  */
3381                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3382                                pf->fc_conf.pause_time);
3383
3384                 fctrl_reg = I40E_READ_REG(hw,
3385                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3386
3387                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3388                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3389                 else
3390                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3391
3392                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3393                                fctrl_reg);
3394         } else {
3395                 /* Configure pause time (2 TCs per register) */
3396                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3397                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3398                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3399
3400                 /* Configure flow control refresh threshold value */
3401                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3402                                pf->fc_conf.pause_time / 2);
3403
3404                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3405
3406                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3407                  *depending on configuration
3408                  */
3409                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3410                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3411                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3412                 } else {
3413                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3414                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3415                 }
3416
3417                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3418         }
3419
3420         /* config the water marker both based on the packets and bytes */
3421         I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
3422                        (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3423                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3424         I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
3425                        (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3426                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3427         I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
3428                        pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3429                        << I40E_KILOSHIFT);
3430         I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
3431                        pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3432                        << I40E_KILOSHIFT);
3433
3434         I40E_WRITE_FLUSH(hw);
3435
3436         return 0;
3437 }
3438
3439 static int
3440 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3441                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3442 {
3443         PMD_INIT_FUNC_TRACE();
3444
3445         return -ENOSYS;
3446 }
3447
3448 /* Add a MAC address, and update filters */
3449 static int
3450 i40e_macaddr_add(struct rte_eth_dev *dev,
3451                  struct ether_addr *mac_addr,
3452                  __rte_unused uint32_t index,
3453                  uint32_t pool)
3454 {
3455         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3456         struct i40e_mac_filter_info mac_filter;
3457         struct i40e_vsi *vsi;
3458         int ret;
3459
3460         /* If VMDQ not enabled or configured, return */
3461         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3462                           !pf->nb_cfg_vmdq_vsi)) {
3463                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3464                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3465                         pool);
3466                 return -ENOTSUP;
3467         }
3468
3469         if (pool > pf->nb_cfg_vmdq_vsi) {
3470                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3471                                 pool, pf->nb_cfg_vmdq_vsi);
3472                 return -EINVAL;
3473         }
3474
3475         rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3476         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3477                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3478         else
3479                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3480
3481         if (pool == 0)
3482                 vsi = pf->main_vsi;
3483         else
3484                 vsi = pf->vmdq[pool - 1].vsi;
3485
3486         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3487         if (ret != I40E_SUCCESS) {
3488                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3489                 return -ENODEV;
3490         }
3491         return 0;
3492 }
3493
3494 /* Remove a MAC address, and update filters */
3495 static void
3496 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3497 {
3498         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3499         struct i40e_vsi *vsi;
3500         struct rte_eth_dev_data *data = dev->data;
3501         struct ether_addr *macaddr;
3502         int ret;
3503         uint32_t i;
3504         uint64_t pool_sel;
3505
3506         macaddr = &(data->mac_addrs[index]);
3507
3508         pool_sel = dev->data->mac_pool_sel[index];
3509
3510         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3511                 if (pool_sel & (1ULL << i)) {
3512                         if (i == 0)
3513                                 vsi = pf->main_vsi;
3514                         else {
3515                                 /* No VMDQ pool enabled or configured */
3516                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3517                                         (i > pf->nb_cfg_vmdq_vsi)) {
3518                                         PMD_DRV_LOG(ERR,
3519                                                 "No VMDQ pool enabled/configured");
3520                                         return;
3521                                 }
3522                                 vsi = pf->vmdq[i - 1].vsi;
3523                         }
3524                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3525
3526                         if (ret) {
3527                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3528                                 return;
3529                         }
3530                 }
3531         }
3532 }
3533
3534 /* Set perfect match or hash match of MAC and VLAN for a VF */
3535 static int
3536 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3537                  struct rte_eth_mac_filter *filter,
3538                  bool add)
3539 {
3540         struct i40e_hw *hw;
3541         struct i40e_mac_filter_info mac_filter;
3542         struct ether_addr old_mac;
3543         struct ether_addr *new_mac;
3544         struct i40e_pf_vf *vf = NULL;
3545         uint16_t vf_id;
3546         int ret;
3547
3548         if (pf == NULL) {
3549                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3550                 return -EINVAL;
3551         }
3552         hw = I40E_PF_TO_HW(pf);
3553
3554         if (filter == NULL) {
3555                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3556                 return -EINVAL;
3557         }
3558
3559         new_mac = &filter->mac_addr;
3560
3561         if (is_zero_ether_addr(new_mac)) {
3562                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3563                 return -EINVAL;
3564         }
3565
3566         vf_id = filter->dst_id;
3567
3568         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3569                 PMD_DRV_LOG(ERR, "Invalid argument.");
3570                 return -EINVAL;
3571         }
3572         vf = &pf->vfs[vf_id];
3573
3574         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3575                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3576                 return -EINVAL;
3577         }
3578
3579         if (add) {
3580                 rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3581                 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3582                                 ETHER_ADDR_LEN);
3583                 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3584                                  ETHER_ADDR_LEN);
3585
3586                 mac_filter.filter_type = filter->filter_type;
3587                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3588                 if (ret != I40E_SUCCESS) {
3589                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3590                         return -1;
3591                 }
3592                 ether_addr_copy(new_mac, &pf->dev_addr);
3593         } else {
3594                 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3595                                 ETHER_ADDR_LEN);
3596                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3597                 if (ret != I40E_SUCCESS) {
3598                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3599                         return -1;
3600                 }
3601
3602                 /* Clear device address as it has been removed */
3603                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3604                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3605         }
3606
3607         return 0;
3608 }
3609
3610 /* MAC filter handle */
3611 static int
3612 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3613                 void *arg)
3614 {
3615         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3616         struct rte_eth_mac_filter *filter;
3617         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3618         int ret = I40E_NOT_SUPPORTED;
3619
3620         filter = (struct rte_eth_mac_filter *)(arg);
3621
3622         switch (filter_op) {
3623         case RTE_ETH_FILTER_NOP:
3624                 ret = I40E_SUCCESS;
3625                 break;
3626         case RTE_ETH_FILTER_ADD:
3627                 i40e_pf_disable_irq0(hw);
3628                 if (filter->is_vf)
3629                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3630                 i40e_pf_enable_irq0(hw);
3631                 break;
3632         case RTE_ETH_FILTER_DELETE:
3633                 i40e_pf_disable_irq0(hw);
3634                 if (filter->is_vf)
3635                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3636                 i40e_pf_enable_irq0(hw);
3637                 break;
3638         default:
3639                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3640                 ret = I40E_ERR_PARAM;
3641                 break;
3642         }
3643
3644         return ret;
3645 }
3646
3647 static int
3648 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3649 {
3650         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3651         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3652         int ret;
3653
3654         if (!lut)
3655                 return -EINVAL;
3656
3657         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3658                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3659                                           lut, lut_size);
3660                 if (ret) {
3661                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3662                         return ret;
3663                 }
3664         } else {
3665                 uint32_t *lut_dw = (uint32_t *)lut;
3666                 uint16_t i, lut_size_dw = lut_size / 4;
3667
3668                 for (i = 0; i < lut_size_dw; i++)
3669                         lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
3670         }
3671
3672         return 0;
3673 }
3674
3675 static int
3676 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3677 {
3678         struct i40e_pf *pf;
3679         struct i40e_hw *hw;
3680         int ret;
3681
3682         if (!vsi || !lut)
3683                 return -EINVAL;
3684
3685         pf = I40E_VSI_TO_PF(vsi);
3686         hw = I40E_VSI_TO_HW(vsi);
3687
3688         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3689                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
3690                                           lut, lut_size);
3691                 if (ret) {
3692                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3693                         return ret;
3694                 }
3695         } else {
3696                 uint32_t *lut_dw = (uint32_t *)lut;
3697                 uint16_t i, lut_size_dw = lut_size / 4;
3698
3699                 for (i = 0; i < lut_size_dw; i++)
3700                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
3701                 I40E_WRITE_FLUSH(hw);
3702         }
3703
3704         return 0;
3705 }
3706
3707 static int
3708 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
3709                          struct rte_eth_rss_reta_entry64 *reta_conf,
3710                          uint16_t reta_size)
3711 {
3712         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3713         uint16_t i, lut_size = pf->hash_lut_size;
3714         uint16_t idx, shift;
3715         uint8_t *lut;
3716         int ret;
3717
3718         if (reta_size != lut_size ||
3719                 reta_size > ETH_RSS_RETA_SIZE_512) {
3720                 PMD_DRV_LOG(ERR,
3721                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3722                         reta_size, lut_size);
3723                 return -EINVAL;
3724         }
3725
3726         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3727         if (!lut) {
3728                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3729                 return -ENOMEM;
3730         }
3731         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3732         if (ret)
3733                 goto out;
3734         for (i = 0; i < reta_size; i++) {
3735                 idx = i / RTE_RETA_GROUP_SIZE;
3736                 shift = i % RTE_RETA_GROUP_SIZE;
3737                 if (reta_conf[idx].mask & (1ULL << shift))
3738                         lut[i] = reta_conf[idx].reta[shift];
3739         }
3740         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
3741
3742 out:
3743         rte_free(lut);
3744
3745         return ret;
3746 }
3747
3748 static int
3749 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
3750                         struct rte_eth_rss_reta_entry64 *reta_conf,
3751                         uint16_t reta_size)
3752 {
3753         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3754         uint16_t i, lut_size = pf->hash_lut_size;
3755         uint16_t idx, shift;
3756         uint8_t *lut;
3757         int ret;
3758
3759         if (reta_size != lut_size ||
3760                 reta_size > ETH_RSS_RETA_SIZE_512) {
3761                 PMD_DRV_LOG(ERR,
3762                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3763                         reta_size, lut_size);
3764                 return -EINVAL;
3765         }
3766
3767         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3768         if (!lut) {
3769                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3770                 return -ENOMEM;
3771         }
3772
3773         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3774         if (ret)
3775                 goto out;
3776         for (i = 0; i < reta_size; i++) {
3777                 idx = i / RTE_RETA_GROUP_SIZE;
3778                 shift = i % RTE_RETA_GROUP_SIZE;
3779                 if (reta_conf[idx].mask & (1ULL << shift))
3780                         reta_conf[idx].reta[shift] = lut[i];
3781         }
3782
3783 out:
3784         rte_free(lut);
3785
3786         return ret;
3787 }
3788
3789 /**
3790  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
3791  * @hw:   pointer to the HW structure
3792  * @mem:  pointer to mem struct to fill out
3793  * @size: size of memory requested
3794  * @alignment: what to align the allocation to
3795  **/
3796 enum i40e_status_code
3797 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3798                         struct i40e_dma_mem *mem,
3799                         u64 size,
3800                         u32 alignment)
3801 {
3802         const struct rte_memzone *mz = NULL;
3803         char z_name[RTE_MEMZONE_NAMESIZE];
3804
3805         if (!mem)
3806                 return I40E_ERR_PARAM;
3807
3808         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
3809         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
3810                                          alignment, RTE_PGSIZE_2M);
3811         if (!mz)
3812                 return I40E_ERR_NO_MEMORY;
3813
3814         mem->size = size;
3815         mem->va = mz->addr;
3816         mem->pa = mz->iova;
3817         mem->zone = (const void *)mz;
3818         PMD_DRV_LOG(DEBUG,
3819                 "memzone %s allocated with physical address: %"PRIu64,
3820                 mz->name, mem->pa);
3821
3822         return I40E_SUCCESS;
3823 }
3824
3825 /**
3826  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
3827  * @hw:   pointer to the HW structure
3828  * @mem:  ptr to mem struct to free
3829  **/
3830 enum i40e_status_code
3831 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3832                     struct i40e_dma_mem *mem)
3833 {
3834         if (!mem)
3835                 return I40E_ERR_PARAM;
3836
3837         PMD_DRV_LOG(DEBUG,
3838                 "memzone %s to be freed with physical address: %"PRIu64,
3839                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
3840         rte_memzone_free((const struct rte_memzone *)mem->zone);
3841         mem->zone = NULL;
3842         mem->va = NULL;
3843         mem->pa = (u64)0;
3844
3845         return I40E_SUCCESS;
3846 }
3847
3848 /**
3849  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
3850  * @hw:   pointer to the HW structure
3851  * @mem:  pointer to mem struct to fill out
3852  * @size: size of memory requested
3853  **/
3854 enum i40e_status_code
3855 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3856                          struct i40e_virt_mem *mem,
3857                          u32 size)
3858 {
3859         if (!mem)
3860                 return I40E_ERR_PARAM;
3861
3862         mem->size = size;
3863         mem->va = rte_zmalloc("i40e", size, 0);
3864
3865         if (mem->va)
3866                 return I40E_SUCCESS;
3867         else
3868                 return I40E_ERR_NO_MEMORY;
3869 }
3870
3871 /**
3872  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
3873  * @hw:   pointer to the HW structure
3874  * @mem:  pointer to mem struct to free
3875  **/
3876 enum i40e_status_code
3877 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3878                      struct i40e_virt_mem *mem)
3879 {
3880         if (!mem)
3881                 return I40E_ERR_PARAM;
3882
3883         rte_free(mem->va);
3884         mem->va = NULL;
3885
3886         return I40E_SUCCESS;
3887 }
3888
3889 void
3890 i40e_init_spinlock_d(struct i40e_spinlock *sp)
3891 {
3892         rte_spinlock_init(&sp->spinlock);
3893 }
3894
3895 void
3896 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
3897 {
3898         rte_spinlock_lock(&sp->spinlock);
3899 }
3900
3901 void
3902 i40e_release_spinlock_d(struct i40e_spinlock *sp)
3903 {
3904         rte_spinlock_unlock(&sp->spinlock);
3905 }
3906
3907 void
3908 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
3909 {
3910         return;
3911 }
3912
3913 /**
3914  * Get the hardware capabilities, which will be parsed
3915  * and saved into struct i40e_hw.
3916  */
3917 static int
3918 i40e_get_cap(struct i40e_hw *hw)
3919 {
3920         struct i40e_aqc_list_capabilities_element_resp *buf;
3921         uint16_t len, size = 0;
3922         int ret;
3923
3924         /* Calculate a huge enough buff for saving response data temporarily */
3925         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
3926                                                 I40E_MAX_CAP_ELE_NUM;
3927         buf = rte_zmalloc("i40e", len, 0);
3928         if (!buf) {
3929                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
3930                 return I40E_ERR_NO_MEMORY;
3931         }
3932
3933         /* Get, parse the capabilities and save it to hw */
3934         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
3935                         i40e_aqc_opc_list_func_capabilities, NULL);
3936         if (ret != I40E_SUCCESS)
3937                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
3938
3939         /* Free the temporary buffer after being used */
3940         rte_free(buf);
3941
3942         return ret;
3943 }
3944
3945 static int
3946 i40e_pf_parameter_init(struct rte_eth_dev *dev)
3947 {
3948         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3949         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3950         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3951         uint16_t qp_count = 0, vsi_count = 0;
3952
3953         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
3954                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
3955                 return -EINVAL;
3956         }
3957         /* Add the parameter init for LFC */
3958         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
3959         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
3960         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
3961
3962         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
3963         pf->max_num_vsi = hw->func_caps.num_vsis;
3964         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
3965         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
3966         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3967
3968         /* FDir queue/VSI allocation */
3969         pf->fdir_qp_offset = 0;
3970         if (hw->func_caps.fd) {
3971                 pf->flags |= I40E_FLAG_FDIR;
3972                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
3973         } else {
3974                 pf->fdir_nb_qps = 0;
3975         }
3976         qp_count += pf->fdir_nb_qps;
3977         vsi_count += 1;
3978
3979         /* LAN queue/VSI allocation */
3980         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
3981         if (!hw->func_caps.rss) {
3982                 pf->lan_nb_qps = 1;
3983         } else {
3984                 pf->flags |= I40E_FLAG_RSS;
3985                 if (hw->mac.type == I40E_MAC_X722)
3986                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
3987                 pf->lan_nb_qps = pf->lan_nb_qp_max;
3988         }
3989         qp_count += pf->lan_nb_qps;
3990         vsi_count += 1;
3991
3992         /* VF queue/VSI allocation */
3993         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
3994         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
3995                 pf->flags |= I40E_FLAG_SRIOV;
3996                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3997                 pf->vf_num = pci_dev->max_vfs;
3998                 PMD_DRV_LOG(DEBUG,
3999                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4000                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4001         } else {
4002                 pf->vf_nb_qps = 0;
4003                 pf->vf_num = 0;
4004         }
4005         qp_count += pf->vf_nb_qps * pf->vf_num;
4006         vsi_count += pf->vf_num;
4007
4008         /* VMDq queue/VSI allocation */
4009         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4010         pf->vmdq_nb_qps = 0;
4011         pf->max_nb_vmdq_vsi = 0;
4012         if (hw->func_caps.vmdq) {
4013                 if (qp_count < hw->func_caps.num_tx_qp &&
4014                         vsi_count < hw->func_caps.num_vsis) {
4015                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4016                                 qp_count) / pf->vmdq_nb_qp_max;
4017
4018                         /* Limit the maximum number of VMDq vsi to the maximum
4019                          * ethdev can support
4020                          */
4021                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4022                                 hw->func_caps.num_vsis - vsi_count);
4023                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4024                                 ETH_64_POOLS);
4025                         if (pf->max_nb_vmdq_vsi) {
4026                                 pf->flags |= I40E_FLAG_VMDQ;
4027                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4028                                 PMD_DRV_LOG(DEBUG,
4029                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4030                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4031                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4032                         } else {
4033                                 PMD_DRV_LOG(INFO,
4034                                         "No enough queues left for VMDq");
4035                         }
4036                 } else {
4037                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4038                 }
4039         }
4040         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4041         vsi_count += pf->max_nb_vmdq_vsi;
4042
4043         if (hw->func_caps.dcb)
4044                 pf->flags |= I40E_FLAG_DCB;
4045
4046         if (qp_count > hw->func_caps.num_tx_qp) {
4047                 PMD_DRV_LOG(ERR,
4048                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4049                         qp_count, hw->func_caps.num_tx_qp);
4050                 return -EINVAL;
4051         }
4052         if (vsi_count > hw->func_caps.num_vsis) {
4053                 PMD_DRV_LOG(ERR,
4054                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4055                         vsi_count, hw->func_caps.num_vsis);
4056                 return -EINVAL;
4057         }
4058
4059         return 0;
4060 }
4061
4062 static int
4063 i40e_pf_get_switch_config(struct i40e_pf *pf)
4064 {
4065         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4066         struct i40e_aqc_get_switch_config_resp *switch_config;
4067         struct i40e_aqc_switch_config_element_resp *element;
4068         uint16_t start_seid = 0, num_reported;
4069         int ret;
4070
4071         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4072                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4073         if (!switch_config) {
4074                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4075                 return -ENOMEM;
4076         }
4077
4078         /* Get the switch configurations */
4079         ret = i40e_aq_get_switch_config(hw, switch_config,
4080                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4081         if (ret != I40E_SUCCESS) {
4082                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4083                 goto fail;
4084         }
4085         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4086         if (num_reported != 1) { /* The number should be 1 */
4087                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4088                 goto fail;
4089         }
4090
4091         /* Parse the switch configuration elements */
4092         element = &(switch_config->element[0]);
4093         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4094                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4095                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4096         } else
4097                 PMD_DRV_LOG(INFO, "Unknown element type");
4098
4099 fail:
4100         rte_free(switch_config);
4101
4102         return ret;
4103 }
4104
4105 static int
4106 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4107                         uint32_t num)
4108 {
4109         struct pool_entry *entry;
4110
4111         if (pool == NULL || num == 0)
4112                 return -EINVAL;
4113
4114         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4115         if (entry == NULL) {
4116                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4117                 return -ENOMEM;
4118         }
4119
4120         /* queue heap initialize */
4121         pool->num_free = num;
4122         pool->num_alloc = 0;
4123         pool->base = base;
4124         LIST_INIT(&pool->alloc_list);
4125         LIST_INIT(&pool->free_list);
4126
4127         /* Initialize element  */
4128         entry->base = 0;
4129         entry->len = num;
4130
4131         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4132         return 0;
4133 }
4134
4135 static void
4136 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4137 {
4138         struct pool_entry *entry, *next_entry;
4139
4140         if (pool == NULL)
4141                 return;
4142
4143         for (entry = LIST_FIRST(&pool->alloc_list);
4144                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4145                         entry = next_entry) {
4146                 LIST_REMOVE(entry, next);
4147                 rte_free(entry);
4148         }
4149
4150         for (entry = LIST_FIRST(&pool->free_list);
4151                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4152                         entry = next_entry) {
4153                 LIST_REMOVE(entry, next);
4154                 rte_free(entry);
4155         }
4156
4157         pool->num_free = 0;
4158         pool->num_alloc = 0;
4159         pool->base = 0;
4160         LIST_INIT(&pool->alloc_list);
4161         LIST_INIT(&pool->free_list);
4162 }
4163
4164 static int
4165 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4166                        uint32_t base)
4167 {
4168         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4169         uint32_t pool_offset;
4170         int insert;
4171
4172         if (pool == NULL) {
4173                 PMD_DRV_LOG(ERR, "Invalid parameter");
4174                 return -EINVAL;
4175         }
4176
4177         pool_offset = base - pool->base;
4178         /* Lookup in alloc list */
4179         LIST_FOREACH(entry, &pool->alloc_list, next) {
4180                 if (entry->base == pool_offset) {
4181                         valid_entry = entry;
4182                         LIST_REMOVE(entry, next);
4183                         break;
4184                 }
4185         }
4186
4187         /* Not find, return */
4188         if (valid_entry == NULL) {
4189                 PMD_DRV_LOG(ERR, "Failed to find entry");
4190                 return -EINVAL;
4191         }
4192
4193         /**
4194          * Found it, move it to free list  and try to merge.
4195          * In order to make merge easier, always sort it by qbase.
4196          * Find adjacent prev and last entries.
4197          */
4198         prev = next = NULL;
4199         LIST_FOREACH(entry, &pool->free_list, next) {
4200                 if (entry->base > valid_entry->base) {
4201                         next = entry;
4202                         break;
4203                 }
4204                 prev = entry;
4205         }
4206
4207         insert = 0;
4208         /* Try to merge with next one*/
4209         if (next != NULL) {
4210                 /* Merge with next one */
4211                 if (valid_entry->base + valid_entry->len == next->base) {
4212                         next->base = valid_entry->base;
4213                         next->len += valid_entry->len;
4214                         rte_free(valid_entry);
4215                         valid_entry = next;
4216                         insert = 1;
4217                 }
4218         }
4219
4220         if (prev != NULL) {
4221                 /* Merge with previous one */
4222                 if (prev->base + prev->len == valid_entry->base) {
4223                         prev->len += valid_entry->len;
4224                         /* If it merge with next one, remove next node */
4225                         if (insert == 1) {
4226                                 LIST_REMOVE(valid_entry, next);
4227                                 rte_free(valid_entry);
4228                         } else {
4229                                 rte_free(valid_entry);
4230                                 insert = 1;
4231                         }
4232                 }
4233         }
4234
4235         /* Not find any entry to merge, insert */
4236         if (insert == 0) {
4237                 if (prev != NULL)
4238                         LIST_INSERT_AFTER(prev, valid_entry, next);
4239                 else if (next != NULL)
4240                         LIST_INSERT_BEFORE(next, valid_entry, next);
4241                 else /* It's empty list, insert to head */
4242                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4243         }
4244
4245         pool->num_free += valid_entry->len;
4246         pool->num_alloc -= valid_entry->len;
4247
4248         return 0;
4249 }
4250
4251 static int
4252 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4253                        uint16_t num)
4254 {
4255         struct pool_entry *entry, *valid_entry;
4256
4257         if (pool == NULL || num == 0) {
4258                 PMD_DRV_LOG(ERR, "Invalid parameter");
4259                 return -EINVAL;
4260         }
4261
4262         if (pool->num_free < num) {
4263                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4264                             num, pool->num_free);
4265                 return -ENOMEM;
4266         }
4267
4268         valid_entry = NULL;
4269         /* Lookup  in free list and find most fit one */
4270         LIST_FOREACH(entry, &pool->free_list, next) {
4271                 if (entry->len >= num) {
4272                         /* Find best one */
4273                         if (entry->len == num) {
4274                                 valid_entry = entry;
4275                                 break;
4276                         }
4277                         if (valid_entry == NULL || valid_entry->len > entry->len)
4278                                 valid_entry = entry;
4279                 }
4280         }
4281
4282         /* Not find one to satisfy the request, return */
4283         if (valid_entry == NULL) {
4284                 PMD_DRV_LOG(ERR, "No valid entry found");
4285                 return -ENOMEM;
4286         }
4287         /**
4288          * The entry have equal queue number as requested,
4289          * remove it from alloc_list.
4290          */
4291         if (valid_entry->len == num) {
4292                 LIST_REMOVE(valid_entry, next);
4293         } else {
4294                 /**
4295                  * The entry have more numbers than requested,
4296                  * create a new entry for alloc_list and minus its
4297                  * queue base and number in free_list.
4298                  */
4299                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4300                 if (entry == NULL) {
4301                         PMD_DRV_LOG(ERR,
4302                                 "Failed to allocate memory for resource pool");
4303                         return -ENOMEM;
4304                 }
4305                 entry->base = valid_entry->base;
4306                 entry->len = num;
4307                 valid_entry->base += num;
4308                 valid_entry->len -= num;
4309                 valid_entry = entry;
4310         }
4311
4312         /* Insert it into alloc list, not sorted */
4313         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4314
4315         pool->num_free -= valid_entry->len;
4316         pool->num_alloc += valid_entry->len;
4317
4318         return valid_entry->base + pool->base;
4319 }
4320
4321 /**
4322  * bitmap_is_subset - Check whether src2 is subset of src1
4323  **/
4324 static inline int
4325 bitmap_is_subset(uint8_t src1, uint8_t src2)
4326 {
4327         return !((src1 ^ src2) & src2);
4328 }
4329
4330 static enum i40e_status_code
4331 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4332 {
4333         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4334
4335         /* If DCB is not supported, only default TC is supported */
4336         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4337                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4338                 return I40E_NOT_SUPPORTED;
4339         }
4340
4341         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4342                 PMD_DRV_LOG(ERR,
4343                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
4344                         hw->func_caps.enabled_tcmap, enabled_tcmap);
4345                 return I40E_NOT_SUPPORTED;
4346         }
4347         return I40E_SUCCESS;
4348 }
4349
4350 int
4351 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4352                                 struct i40e_vsi_vlan_pvid_info *info)
4353 {
4354         struct i40e_hw *hw;
4355         struct i40e_vsi_context ctxt;
4356         uint8_t vlan_flags = 0;
4357         int ret;
4358
4359         if (vsi == NULL || info == NULL) {
4360                 PMD_DRV_LOG(ERR, "invalid parameters");
4361                 return I40E_ERR_PARAM;
4362         }
4363
4364         if (info->on) {
4365                 vsi->info.pvid = info->config.pvid;
4366                 /**
4367                  * If insert pvid is enabled, only tagged pkts are
4368                  * allowed to be sent out.
4369                  */
4370                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4371                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4372         } else {
4373                 vsi->info.pvid = 0;
4374                 if (info->config.reject.tagged == 0)
4375                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4376
4377                 if (info->config.reject.untagged == 0)
4378                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4379         }
4380         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4381                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
4382         vsi->info.port_vlan_flags |= vlan_flags;
4383         vsi->info.valid_sections =
4384                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4385         memset(&ctxt, 0, sizeof(ctxt));
4386         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4387         ctxt.seid = vsi->seid;
4388
4389         hw = I40E_VSI_TO_HW(vsi);
4390         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4391         if (ret != I40E_SUCCESS)
4392                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4393
4394         return ret;
4395 }
4396
4397 static int
4398 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4399 {
4400         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4401         int i, ret;
4402         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4403
4404         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4405         if (ret != I40E_SUCCESS)
4406                 return ret;
4407
4408         if (!vsi->seid) {
4409                 PMD_DRV_LOG(ERR, "seid not valid");
4410                 return -EINVAL;
4411         }
4412
4413         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4414         tc_bw_data.tc_valid_bits = enabled_tcmap;
4415         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4416                 tc_bw_data.tc_bw_credits[i] =
4417                         (enabled_tcmap & (1 << i)) ? 1 : 0;
4418
4419         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4420         if (ret != I40E_SUCCESS) {
4421                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4422                 return ret;
4423         }
4424
4425         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4426                                         sizeof(vsi->info.qs_handle));
4427         return I40E_SUCCESS;
4428 }
4429
4430 static enum i40e_status_code
4431 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4432                                  struct i40e_aqc_vsi_properties_data *info,
4433                                  uint8_t enabled_tcmap)
4434 {
4435         enum i40e_status_code ret;
4436         int i, total_tc = 0;
4437         uint16_t qpnum_per_tc, bsf, qp_idx;
4438
4439         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4440         if (ret != I40E_SUCCESS)
4441                 return ret;
4442
4443         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4444                 if (enabled_tcmap & (1 << i))
4445                         total_tc++;
4446         if (total_tc == 0)
4447                 total_tc = 1;
4448         vsi->enabled_tc = enabled_tcmap;
4449
4450         /* Number of queues per enabled TC */
4451         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4452         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4453         bsf = rte_bsf32(qpnum_per_tc);
4454
4455         /* Adjust the queue number to actual queues that can be applied */
4456         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4457                 vsi->nb_qps = qpnum_per_tc * total_tc;
4458
4459         /**
4460          * Configure TC and queue mapping parameters, for enabled TC,
4461          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4462          * default queue will serve it.
4463          */
4464         qp_idx = 0;
4465         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4466                 if (vsi->enabled_tc & (1 << i)) {
4467                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4468                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4469                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4470                         qp_idx += qpnum_per_tc;
4471                 } else
4472                         info->tc_mapping[i] = 0;
4473         }
4474
4475         /* Associate queue number with VSI */
4476         if (vsi->type == I40E_VSI_SRIOV) {
4477                 info->mapping_flags |=
4478                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4479                 for (i = 0; i < vsi->nb_qps; i++)
4480                         info->queue_mapping[i] =
4481                                 rte_cpu_to_le_16(vsi->base_queue + i);
4482         } else {
4483                 info->mapping_flags |=
4484                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4485                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4486         }
4487         info->valid_sections |=
4488                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4489
4490         return I40E_SUCCESS;
4491 }
4492
4493 static int
4494 i40e_veb_release(struct i40e_veb *veb)
4495 {
4496         struct i40e_vsi *vsi;
4497         struct i40e_hw *hw;
4498
4499         if (veb == NULL)
4500                 return -EINVAL;
4501
4502         if (!TAILQ_EMPTY(&veb->head)) {
4503                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4504                 return -EACCES;
4505         }
4506         /* associate_vsi field is NULL for floating VEB */
4507         if (veb->associate_vsi != NULL) {
4508                 vsi = veb->associate_vsi;
4509                 hw = I40E_VSI_TO_HW(vsi);
4510
4511                 vsi->uplink_seid = veb->uplink_seid;
4512                 vsi->veb = NULL;
4513         } else {
4514                 veb->associate_pf->main_vsi->floating_veb = NULL;
4515                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4516         }
4517
4518         i40e_aq_delete_element(hw, veb->seid, NULL);
4519         rte_free(veb);
4520         return I40E_SUCCESS;
4521 }
4522
4523 /* Setup a veb */
4524 static struct i40e_veb *
4525 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4526 {
4527         struct i40e_veb *veb;
4528         int ret;
4529         struct i40e_hw *hw;
4530
4531         if (pf == NULL) {
4532                 PMD_DRV_LOG(ERR,
4533                             "veb setup failed, associated PF shouldn't null");
4534                 return NULL;
4535         }
4536         hw = I40E_PF_TO_HW(pf);
4537
4538         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4539         if (!veb) {
4540                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4541                 goto fail;
4542         }
4543
4544         veb->associate_vsi = vsi;
4545         veb->associate_pf = pf;
4546         TAILQ_INIT(&veb->head);
4547         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4548
4549         /* create floating veb if vsi is NULL */
4550         if (vsi != NULL) {
4551                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4552                                       I40E_DEFAULT_TCMAP, false,
4553                                       &veb->seid, false, NULL);
4554         } else {
4555                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4556                                       true, &veb->seid, false, NULL);
4557         }
4558
4559         if (ret != I40E_SUCCESS) {
4560                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4561                             hw->aq.asq_last_status);
4562                 goto fail;
4563         }
4564         veb->enabled_tc = I40E_DEFAULT_TCMAP;
4565
4566         /* get statistics index */
4567         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4568                                 &veb->stats_idx, NULL, NULL, NULL);
4569         if (ret != I40E_SUCCESS) {
4570                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
4571                             hw->aq.asq_last_status);
4572                 goto fail;
4573         }
4574         /* Get VEB bandwidth, to be implemented */
4575         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4576         if (vsi)
4577                 vsi->uplink_seid = veb->seid;
4578
4579         return veb;
4580 fail:
4581         rte_free(veb);
4582         return NULL;
4583 }
4584
4585 int
4586 i40e_vsi_release(struct i40e_vsi *vsi)
4587 {
4588         struct i40e_pf *pf;
4589         struct i40e_hw *hw;
4590         struct i40e_vsi_list *vsi_list;
4591         void *temp;
4592         int ret;
4593         struct i40e_mac_filter *f;
4594         uint16_t user_param;
4595
4596         if (!vsi)
4597                 return I40E_SUCCESS;
4598
4599         if (!vsi->adapter)
4600                 return -EFAULT;
4601
4602         user_param = vsi->user_param;
4603
4604         pf = I40E_VSI_TO_PF(vsi);
4605         hw = I40E_VSI_TO_HW(vsi);
4606
4607         /* VSI has child to attach, release child first */
4608         if (vsi->veb) {
4609                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
4610                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4611                                 return -1;
4612                 }
4613                 i40e_veb_release(vsi->veb);
4614         }
4615
4616         if (vsi->floating_veb) {
4617                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
4618                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4619                                 return -1;
4620                 }
4621         }
4622
4623         /* Remove all macvlan filters of the VSI */
4624         i40e_vsi_remove_all_macvlan_filter(vsi);
4625         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
4626                 rte_free(f);
4627
4628         if (vsi->type != I40E_VSI_MAIN &&
4629             ((vsi->type != I40E_VSI_SRIOV) ||
4630             !pf->floating_veb_list[user_param])) {
4631                 /* Remove vsi from parent's sibling list */
4632                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
4633                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4634                         return I40E_ERR_PARAM;
4635                 }
4636                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
4637                                 &vsi->sib_vsi_list, list);
4638
4639                 /* Remove all switch element of the VSI */
4640                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4641                 if (ret != I40E_SUCCESS)
4642                         PMD_DRV_LOG(ERR, "Failed to delete element");
4643         }
4644
4645         if ((vsi->type == I40E_VSI_SRIOV) &&
4646             pf->floating_veb_list[user_param]) {
4647                 /* Remove vsi from parent's sibling list */
4648                 if (vsi->parent_vsi == NULL ||
4649                     vsi->parent_vsi->floating_veb == NULL) {
4650                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4651                         return I40E_ERR_PARAM;
4652                 }
4653                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
4654                              &vsi->sib_vsi_list, list);
4655
4656                 /* Remove all switch element of the VSI */
4657                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4658                 if (ret != I40E_SUCCESS)
4659                         PMD_DRV_LOG(ERR, "Failed to delete element");
4660         }
4661
4662         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
4663
4664         if (vsi->type != I40E_VSI_SRIOV)
4665                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
4666         rte_free(vsi);
4667
4668         return I40E_SUCCESS;
4669 }
4670
4671 static int
4672 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
4673 {
4674         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4675         struct i40e_aqc_remove_macvlan_element_data def_filter;
4676         struct i40e_mac_filter_info filter;
4677         int ret;
4678
4679         if (vsi->type != I40E_VSI_MAIN)
4680                 return I40E_ERR_CONFIG;
4681         memset(&def_filter, 0, sizeof(def_filter));
4682         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
4683                                         ETH_ADDR_LEN);
4684         def_filter.vlan_tag = 0;
4685         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4686                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4687         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
4688         if (ret != I40E_SUCCESS) {
4689                 struct i40e_mac_filter *f;
4690                 struct ether_addr *mac;
4691
4692                 PMD_DRV_LOG(DEBUG,
4693                             "Cannot remove the default macvlan filter");
4694                 /* It needs to add the permanent mac into mac list */
4695                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4696                 if (f == NULL) {
4697                         PMD_DRV_LOG(ERR, "failed to allocate memory");
4698                         return I40E_ERR_NO_MEMORY;
4699                 }
4700                 mac = &f->mac_info.mac_addr;
4701                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
4702                                 ETH_ADDR_LEN);
4703                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4704                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4705                 vsi->mac_num++;
4706
4707                 return ret;
4708         }
4709         rte_memcpy(&filter.mac_addr,
4710                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
4711         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4712         return i40e_vsi_add_mac(vsi, &filter);
4713 }
4714
4715 /*
4716  * i40e_vsi_get_bw_config - Query VSI BW Information
4717  * @vsi: the VSI to be queried
4718  *
4719  * Returns 0 on success, negative value on failure
4720  */
4721 static enum i40e_status_code
4722 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
4723 {
4724         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
4725         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
4726         struct i40e_hw *hw = &vsi->adapter->hw;
4727         i40e_status ret;
4728         int i;
4729         uint32_t bw_max;
4730
4731         memset(&bw_config, 0, sizeof(bw_config));
4732         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4733         if (ret != I40E_SUCCESS) {
4734                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
4735                             hw->aq.asq_last_status);
4736                 return ret;
4737         }
4738
4739         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
4740         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
4741                                         &ets_sla_config, NULL);
4742         if (ret != I40E_SUCCESS) {
4743                 PMD_DRV_LOG(ERR,
4744                         "VSI failed to get TC bandwdith configuration %u",
4745                         hw->aq.asq_last_status);
4746                 return ret;
4747         }
4748
4749         /* store and print out BW info */
4750         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
4751         vsi->bw_info.bw_max = bw_config.max_bw;
4752         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
4753         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
4754         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
4755                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
4756                      I40E_16_BIT_WIDTH);
4757         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4758                 vsi->bw_info.bw_ets_share_credits[i] =
4759                                 ets_sla_config.share_credits[i];
4760                 vsi->bw_info.bw_ets_credits[i] =
4761                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
4762                 /* 4 bits per TC, 4th bit is reserved */
4763                 vsi->bw_info.bw_ets_max[i] =
4764                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
4765                                   RTE_LEN2MASK(3, uint8_t));
4766                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
4767                             vsi->bw_info.bw_ets_share_credits[i]);
4768                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
4769                             vsi->bw_info.bw_ets_credits[i]);
4770                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
4771                             vsi->bw_info.bw_ets_max[i]);
4772         }
4773
4774         return I40E_SUCCESS;
4775 }
4776
4777 /* i40e_enable_pf_lb
4778  * @pf: pointer to the pf structure
4779  *
4780  * allow loopback on pf
4781  */
4782 static inline void
4783 i40e_enable_pf_lb(struct i40e_pf *pf)
4784 {
4785         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4786         struct i40e_vsi_context ctxt;
4787         int ret;
4788
4789         /* Use the FW API if FW >= v5.0 */
4790         if (hw->aq.fw_maj_ver < 5) {
4791                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
4792                 return;
4793         }
4794
4795         memset(&ctxt, 0, sizeof(ctxt));
4796         ctxt.seid = pf->main_vsi_seid;
4797         ctxt.pf_num = hw->pf_id;
4798         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4799         if (ret) {
4800                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
4801                             ret, hw->aq.asq_last_status);
4802                 return;
4803         }
4804         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4805         ctxt.info.valid_sections =
4806                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4807         ctxt.info.switch_id |=
4808                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4809
4810         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4811         if (ret)
4812                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
4813                             hw->aq.asq_last_status);
4814 }
4815
4816 /* Setup a VSI */
4817 struct i40e_vsi *
4818 i40e_vsi_setup(struct i40e_pf *pf,
4819                enum i40e_vsi_type type,
4820                struct i40e_vsi *uplink_vsi,
4821                uint16_t user_param)
4822 {
4823         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4824         struct i40e_vsi *vsi;
4825         struct i40e_mac_filter_info filter;
4826         int ret;
4827         struct i40e_vsi_context ctxt;
4828         struct ether_addr broadcast =
4829                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
4830
4831         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
4832             uplink_vsi == NULL) {
4833                 PMD_DRV_LOG(ERR,
4834                         "VSI setup failed, VSI link shouldn't be NULL");
4835                 return NULL;
4836         }
4837
4838         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
4839                 PMD_DRV_LOG(ERR,
4840                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
4841                 return NULL;
4842         }
4843
4844         /* two situations
4845          * 1.type is not MAIN and uplink vsi is not NULL
4846          * If uplink vsi didn't setup VEB, create one first under veb field
4847          * 2.type is SRIOV and the uplink is NULL
4848          * If floating VEB is NULL, create one veb under floating veb field
4849          */
4850
4851         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
4852             uplink_vsi->veb == NULL) {
4853                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
4854
4855                 if (uplink_vsi->veb == NULL) {
4856                         PMD_DRV_LOG(ERR, "VEB setup failed");
4857                         return NULL;
4858                 }
4859                 /* set ALLOWLOOPBACk on pf, when veb is created */
4860                 i40e_enable_pf_lb(pf);
4861         }
4862
4863         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
4864             pf->main_vsi->floating_veb == NULL) {
4865                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
4866
4867                 if (pf->main_vsi->floating_veb == NULL) {
4868                         PMD_DRV_LOG(ERR, "VEB setup failed");
4869                         return NULL;
4870                 }
4871         }
4872
4873         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
4874         if (!vsi) {
4875                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
4876                 return NULL;
4877         }
4878         TAILQ_INIT(&vsi->mac_list);
4879         vsi->type = type;
4880         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
4881         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
4882         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
4883         vsi->user_param = user_param;
4884         vsi->vlan_anti_spoof_on = 0;
4885         vsi->vlan_filter_on = 0;
4886         /* Allocate queues */
4887         switch (vsi->type) {
4888         case I40E_VSI_MAIN  :
4889                 vsi->nb_qps = pf->lan_nb_qps;
4890                 break;
4891         case I40E_VSI_SRIOV :
4892                 vsi->nb_qps = pf->vf_nb_qps;
4893                 break;
4894         case I40E_VSI_VMDQ2:
4895                 vsi->nb_qps = pf->vmdq_nb_qps;
4896                 break;
4897         case I40E_VSI_FDIR:
4898                 vsi->nb_qps = pf->fdir_nb_qps;
4899                 break;
4900         default:
4901                 goto fail_mem;
4902         }
4903         /*
4904          * The filter status descriptor is reported in rx queue 0,
4905          * while the tx queue for fdir filter programming has no
4906          * such constraints, can be non-zero queues.
4907          * To simplify it, choose FDIR vsi use queue 0 pair.
4908          * To make sure it will use queue 0 pair, queue allocation
4909          * need be done before this function is called
4910          */
4911         if (type != I40E_VSI_FDIR) {
4912                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
4913                         if (ret < 0) {
4914                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
4915                                                 vsi->seid, ret);
4916                                 goto fail_mem;
4917                         }
4918                         vsi->base_queue = ret;
4919         } else
4920                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
4921
4922         /* VF has MSIX interrupt in VF range, don't allocate here */
4923         if (type == I40E_VSI_MAIN) {
4924                 ret = i40e_res_pool_alloc(&pf->msix_pool,
4925                                           RTE_MIN(vsi->nb_qps,
4926                                                   RTE_MAX_RXTX_INTR_VEC_ID));
4927                 if (ret < 0) {
4928                         PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
4929                                     vsi->seid, ret);
4930                         goto fail_queue_alloc;
4931                 }
4932                 vsi->msix_intr = ret;
4933                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
4934         } else if (type != I40E_VSI_SRIOV) {
4935                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
4936                 if (ret < 0) {
4937                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
4938                         goto fail_queue_alloc;
4939                 }
4940                 vsi->msix_intr = ret;
4941                 vsi->nb_msix = 1;
4942         } else {
4943                 vsi->msix_intr = 0;
4944                 vsi->nb_msix = 0;
4945         }
4946
4947         /* Add VSI */
4948         if (type == I40E_VSI_MAIN) {
4949                 /* For main VSI, no need to add since it's default one */
4950                 vsi->uplink_seid = pf->mac_seid;
4951                 vsi->seid = pf->main_vsi_seid;
4952                 /* Bind queues with specific MSIX interrupt */
4953                 /**
4954                  * Needs 2 interrupt at least, one for misc cause which will
4955                  * enabled from OS side, Another for queues binding the
4956                  * interrupt from device side only.
4957                  */
4958
4959                 /* Get default VSI parameters from hardware */
4960                 memset(&ctxt, 0, sizeof(ctxt));
4961                 ctxt.seid = vsi->seid;
4962                 ctxt.pf_num = hw->pf_id;
4963                 ctxt.uplink_seid = vsi->uplink_seid;
4964                 ctxt.vf_num = 0;
4965                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4966                 if (ret != I40E_SUCCESS) {
4967                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
4968                         goto fail_msix_alloc;
4969                 }
4970                 rte_memcpy(&vsi->info, &ctxt.info,
4971                         sizeof(struct i40e_aqc_vsi_properties_data));
4972                 vsi->vsi_id = ctxt.vsi_number;
4973                 vsi->info.valid_sections = 0;
4974
4975                 /* Configure tc, enabled TC0 only */
4976                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
4977                         I40E_SUCCESS) {
4978                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
4979                         goto fail_msix_alloc;
4980                 }
4981
4982                 /* TC, queue mapping */
4983                 memset(&ctxt, 0, sizeof(ctxt));
4984                 vsi->info.valid_sections |=
4985                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4986                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
4987                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4988                 rte_memcpy(&ctxt.info, &vsi->info,
4989                         sizeof(struct i40e_aqc_vsi_properties_data));
4990                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4991                                                 I40E_DEFAULT_TCMAP);
4992                 if (ret != I40E_SUCCESS) {
4993                         PMD_DRV_LOG(ERR,
4994                                 "Failed to configure TC queue mapping");
4995                         goto fail_msix_alloc;
4996                 }
4997                 ctxt.seid = vsi->seid;
4998                 ctxt.pf_num = hw->pf_id;
4999                 ctxt.uplink_seid = vsi->uplink_seid;
5000                 ctxt.vf_num = 0;
5001
5002                 /* Update VSI parameters */
5003                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5004                 if (ret != I40E_SUCCESS) {
5005                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5006                         goto fail_msix_alloc;
5007                 }
5008
5009                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5010                                                 sizeof(vsi->info.tc_mapping));
5011                 rte_memcpy(&vsi->info.queue_mapping,
5012                                 &ctxt.info.queue_mapping,
5013                         sizeof(vsi->info.queue_mapping));
5014                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5015                 vsi->info.valid_sections = 0;
5016
5017                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5018                                 ETH_ADDR_LEN);
5019
5020                 /**
5021                  * Updating default filter settings are necessary to prevent
5022                  * reception of tagged packets.
5023                  * Some old firmware configurations load a default macvlan
5024                  * filter which accepts both tagged and untagged packets.
5025                  * The updating is to use a normal filter instead if needed.
5026                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5027                  * The firmware with correct configurations load the default
5028                  * macvlan filter which is expected and cannot be removed.
5029                  */
5030                 i40e_update_default_filter_setting(vsi);
5031                 i40e_config_qinq(hw, vsi);
5032         } else if (type == I40E_VSI_SRIOV) {
5033                 memset(&ctxt, 0, sizeof(ctxt));
5034                 /**
5035                  * For other VSI, the uplink_seid equals to uplink VSI's
5036                  * uplink_seid since they share same VEB
5037                  */
5038                 if (uplink_vsi == NULL)
5039                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5040                 else
5041                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5042                 ctxt.pf_num = hw->pf_id;
5043                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5044                 ctxt.uplink_seid = vsi->uplink_seid;
5045                 ctxt.connection_type = 0x1;
5046                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5047
5048                 /* Use the VEB configuration if FW >= v5.0 */
5049                 if (hw->aq.fw_maj_ver >= 5) {
5050                         /* Configure switch ID */
5051                         ctxt.info.valid_sections |=
5052                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5053                         ctxt.info.switch_id =
5054                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5055                 }
5056
5057                 /* Configure port/vlan */
5058                 ctxt.info.valid_sections |=
5059                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5060                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5061                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5062                                                 hw->func_caps.enabled_tcmap);
5063                 if (ret != I40E_SUCCESS) {
5064                         PMD_DRV_LOG(ERR,
5065                                 "Failed to configure TC queue mapping");
5066                         goto fail_msix_alloc;
5067                 }
5068
5069                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5070                 ctxt.info.valid_sections |=
5071                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5072                 /**
5073                  * Since VSI is not created yet, only configure parameter,
5074                  * will add vsi below.
5075                  */
5076
5077                 i40e_config_qinq(hw, vsi);
5078         } else if (type == I40E_VSI_VMDQ2) {
5079                 memset(&ctxt, 0, sizeof(ctxt));
5080                 /*
5081                  * For other VSI, the uplink_seid equals to uplink VSI's
5082                  * uplink_seid since they share same VEB
5083                  */
5084                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5085                 ctxt.pf_num = hw->pf_id;
5086                 ctxt.vf_num = 0;
5087                 ctxt.uplink_seid = vsi->uplink_seid;
5088                 ctxt.connection_type = 0x1;
5089                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5090
5091                 ctxt.info.valid_sections |=
5092                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5093                 /* user_param carries flag to enable loop back */
5094                 if (user_param) {
5095                         ctxt.info.switch_id =
5096                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5097                         ctxt.info.switch_id |=
5098                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5099                 }
5100
5101                 /* Configure port/vlan */
5102                 ctxt.info.valid_sections |=
5103                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5104                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5105                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5106                                                 I40E_DEFAULT_TCMAP);
5107                 if (ret != I40E_SUCCESS) {
5108                         PMD_DRV_LOG(ERR,
5109                                 "Failed to configure TC queue mapping");
5110                         goto fail_msix_alloc;
5111                 }
5112                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5113                 ctxt.info.valid_sections |=
5114                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5115         } else if (type == I40E_VSI_FDIR) {
5116                 memset(&ctxt, 0, sizeof(ctxt));
5117                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5118                 ctxt.pf_num = hw->pf_id;
5119                 ctxt.vf_num = 0;
5120                 ctxt.uplink_seid = vsi->uplink_seid;
5121                 ctxt.connection_type = 0x1;     /* regular data port */
5122                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5123                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5124                                                 I40E_DEFAULT_TCMAP);
5125                 if (ret != I40E_SUCCESS) {
5126                         PMD_DRV_LOG(ERR,
5127                                 "Failed to configure TC queue mapping.");
5128                         goto fail_msix_alloc;
5129                 }
5130                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5131                 ctxt.info.valid_sections |=
5132                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5133         } else {
5134                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5135                 goto fail_msix_alloc;
5136         }
5137
5138         if (vsi->type != I40E_VSI_MAIN) {
5139                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5140                 if (ret != I40E_SUCCESS) {
5141                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5142                                     hw->aq.asq_last_status);
5143                         goto fail_msix_alloc;
5144                 }
5145                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5146                 vsi->info.valid_sections = 0;
5147                 vsi->seid = ctxt.seid;
5148                 vsi->vsi_id = ctxt.vsi_number;
5149                 vsi->sib_vsi_list.vsi = vsi;
5150                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5151                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5152                                           &vsi->sib_vsi_list, list);
5153                 } else {
5154                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5155                                           &vsi->sib_vsi_list, list);
5156                 }
5157         }
5158
5159         /* MAC/VLAN configuration */
5160         rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5161         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5162
5163         ret = i40e_vsi_add_mac(vsi, &filter);
5164         if (ret != I40E_SUCCESS) {
5165                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5166                 goto fail_msix_alloc;
5167         }
5168
5169         /* Get VSI BW information */
5170         i40e_vsi_get_bw_config(vsi);
5171         return vsi;
5172 fail_msix_alloc:
5173         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5174 fail_queue_alloc:
5175         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5176 fail_mem:
5177         rte_free(vsi);
5178         return NULL;
5179 }
5180
5181 /* Configure vlan filter on or off */
5182 int
5183 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5184 {
5185         int i, num;
5186         struct i40e_mac_filter *f;
5187         void *temp;
5188         struct i40e_mac_filter_info *mac_filter;
5189         enum rte_mac_filter_type desired_filter;
5190         int ret = I40E_SUCCESS;
5191
5192         if (on) {
5193                 /* Filter to match MAC and VLAN */
5194                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5195         } else {
5196                 /* Filter to match only MAC */
5197                 desired_filter = RTE_MAC_PERFECT_MATCH;
5198         }
5199
5200         num = vsi->mac_num;
5201
5202         mac_filter = rte_zmalloc("mac_filter_info_data",
5203                                  num * sizeof(*mac_filter), 0);
5204         if (mac_filter == NULL) {
5205                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5206                 return I40E_ERR_NO_MEMORY;
5207         }
5208
5209         i = 0;
5210
5211         /* Remove all existing mac */
5212         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5213                 mac_filter[i] = f->mac_info;
5214                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5215                 if (ret) {
5216                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5217                                     on ? "enable" : "disable");
5218                         goto DONE;
5219                 }
5220                 i++;
5221         }
5222
5223         /* Override with new filter */
5224         for (i = 0; i < num; i++) {
5225                 mac_filter[i].filter_type = desired_filter;
5226                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5227                 if (ret) {
5228                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5229                                     on ? "enable" : "disable");
5230                         goto DONE;
5231                 }
5232         }
5233
5234 DONE:
5235         rte_free(mac_filter);
5236         return ret;
5237 }
5238
5239 /* Configure vlan stripping on or off */
5240 int
5241 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5242 {
5243         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5244         struct i40e_vsi_context ctxt;
5245         uint8_t vlan_flags;
5246         int ret = I40E_SUCCESS;
5247
5248         /* Check if it has been already on or off */
5249         if (vsi->info.valid_sections &
5250                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5251                 if (on) {
5252                         if ((vsi->info.port_vlan_flags &
5253                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5254                                 return 0; /* already on */
5255                 } else {
5256                         if ((vsi->info.port_vlan_flags &
5257                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5258                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5259                                 return 0; /* already off */
5260                 }
5261         }
5262
5263         if (on)
5264                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5265         else
5266                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5267         vsi->info.valid_sections =
5268                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5269         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5270         vsi->info.port_vlan_flags |= vlan_flags;
5271         ctxt.seid = vsi->seid;
5272         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5273         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5274         if (ret)
5275                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5276                             on ? "enable" : "disable");
5277
5278         return ret;
5279 }
5280
5281 static int
5282 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5283 {
5284         struct rte_eth_dev_data *data = dev->data;
5285         int ret;
5286         int mask = 0;
5287
5288         /* Apply vlan offload setting */
5289         mask = ETH_VLAN_STRIP_MASK |
5290                ETH_VLAN_FILTER_MASK |
5291                ETH_VLAN_EXTEND_MASK;
5292         ret = i40e_vlan_offload_set(dev, mask);
5293         if (ret) {
5294                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
5295                 return ret;
5296         }
5297
5298         /* Apply pvid setting */
5299         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5300                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
5301         if (ret)
5302                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5303
5304         return ret;
5305 }
5306
5307 static int
5308 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5309 {
5310         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5311
5312         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5313 }
5314
5315 static int
5316 i40e_update_flow_control(struct i40e_hw *hw)
5317 {
5318 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5319         struct i40e_link_status link_status;
5320         uint32_t rxfc = 0, txfc = 0, reg;
5321         uint8_t an_info;
5322         int ret;
5323
5324         memset(&link_status, 0, sizeof(link_status));
5325         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5326         if (ret != I40E_SUCCESS) {
5327                 PMD_DRV_LOG(ERR, "Failed to get link status information");
5328                 goto write_reg; /* Disable flow control */
5329         }
5330
5331         an_info = hw->phy.link_info.an_info;
5332         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5333                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5334                 ret = I40E_ERR_NOT_READY;
5335                 goto write_reg; /* Disable flow control */
5336         }
5337         /**
5338          * If link auto negotiation is enabled, flow control needs to
5339          * be configured according to it
5340          */
5341         switch (an_info & I40E_LINK_PAUSE_RXTX) {
5342         case I40E_LINK_PAUSE_RXTX:
5343                 rxfc = 1;
5344                 txfc = 1;
5345                 hw->fc.current_mode = I40E_FC_FULL;
5346                 break;
5347         case I40E_AQ_LINK_PAUSE_RX:
5348                 rxfc = 1;
5349                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5350                 break;
5351         case I40E_AQ_LINK_PAUSE_TX:
5352                 txfc = 1;
5353                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5354                 break;
5355         default:
5356                 hw->fc.current_mode = I40E_FC_NONE;
5357                 break;
5358         }
5359
5360 write_reg:
5361         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5362                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5363         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5364         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5365         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5366         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5367
5368         return ret;
5369 }
5370
5371 /* PF setup */
5372 static int
5373 i40e_pf_setup(struct i40e_pf *pf)
5374 {
5375         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5376         struct i40e_filter_control_settings settings;
5377         struct i40e_vsi *vsi;
5378         int ret;
5379
5380         /* Clear all stats counters */
5381         pf->offset_loaded = FALSE;
5382         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5383         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5384         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
5385         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
5386
5387         ret = i40e_pf_get_switch_config(pf);
5388         if (ret != I40E_SUCCESS) {
5389                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5390                 return ret;
5391         }
5392         if (pf->flags & I40E_FLAG_FDIR) {
5393                 /* make queue allocated first, let FDIR use queue pair 0*/
5394                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5395                 if (ret != I40E_FDIR_QUEUE_ID) {
5396                         PMD_DRV_LOG(ERR,
5397                                 "queue allocation fails for FDIR: ret =%d",
5398                                 ret);
5399                         pf->flags &= ~I40E_FLAG_FDIR;
5400                 }
5401         }
5402         /*  main VSI setup */
5403         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5404         if (!vsi) {
5405                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5406                 return I40E_ERR_NOT_READY;
5407         }
5408         pf->main_vsi = vsi;
5409
5410         /* Configure filter control */
5411         memset(&settings, 0, sizeof(settings));
5412         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5413                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5414         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5415                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5416         else {
5417                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5418                         hw->func_caps.rss_table_size);
5419                 return I40E_ERR_PARAM;
5420         }
5421         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5422                 hw->func_caps.rss_table_size);
5423         pf->hash_lut_size = hw->func_caps.rss_table_size;
5424
5425         /* Enable ethtype and macvlan filters */
5426         settings.enable_ethtype = TRUE;
5427         settings.enable_macvlan = TRUE;
5428         ret = i40e_set_filter_control(hw, &settings);
5429         if (ret)
5430                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5431                                                                 ret);
5432
5433         /* Update flow control according to the auto negotiation */
5434         i40e_update_flow_control(hw);
5435
5436         return I40E_SUCCESS;
5437 }
5438
5439 int
5440 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5441 {
5442         uint32_t reg;
5443         uint16_t j;
5444
5445         /**
5446          * Set or clear TX Queue Disable flags,
5447          * which is required by hardware.
5448          */
5449         i40e_pre_tx_queue_cfg(hw, q_idx, on);
5450         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5451
5452         /* Wait until the request is finished */
5453         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5454                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5455                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5456                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5457                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5458                                                         & 0x1))) {
5459                         break;
5460                 }
5461         }
5462         if (on) {
5463                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5464                         return I40E_SUCCESS; /* already on, skip next steps */
5465
5466                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5467                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5468         } else {
5469                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5470                         return I40E_SUCCESS; /* already off, skip next steps */
5471                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5472         }
5473         /* Write the register */
5474         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5475         /* Check the result */
5476         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5477                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5478                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5479                 if (on) {
5480                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5481                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5482                                 break;
5483                 } else {
5484                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5485                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5486                                 break;
5487                 }
5488         }
5489         /* Check if it is timeout */
5490         if (j >= I40E_CHK_Q_ENA_COUNT) {
5491                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5492                             (on ? "enable" : "disable"), q_idx);
5493                 return I40E_ERR_TIMEOUT;
5494         }
5495
5496         return I40E_SUCCESS;
5497 }
5498
5499 /* Swith on or off the tx queues */
5500 static int
5501 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5502 {
5503         struct rte_eth_dev_data *dev_data = pf->dev_data;
5504         struct i40e_tx_queue *txq;
5505         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5506         uint16_t i;
5507         int ret;
5508
5509         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5510                 txq = dev_data->tx_queues[i];
5511                 /* Don't operate the queue if not configured or
5512                  * if starting only per queue */
5513                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5514                         continue;
5515                 if (on)
5516                         ret = i40e_dev_tx_queue_start(dev, i);
5517                 else
5518                         ret = i40e_dev_tx_queue_stop(dev, i);
5519                 if ( ret != I40E_SUCCESS)
5520                         return ret;
5521         }
5522
5523         return I40E_SUCCESS;
5524 }
5525
5526 int
5527 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5528 {
5529         uint32_t reg;
5530         uint16_t j;
5531
5532         /* Wait until the request is finished */
5533         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5534                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5535                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5536                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5537                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5538                         break;
5539         }
5540
5541         if (on) {
5542                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5543                         return I40E_SUCCESS; /* Already on, skip next steps */
5544                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5545         } else {
5546                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5547                         return I40E_SUCCESS; /* Already off, skip next steps */
5548                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5549         }
5550
5551         /* Write the register */
5552         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5553         /* Check the result */
5554         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5555                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5556                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5557                 if (on) {
5558                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5559                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
5560                                 break;
5561                 } else {
5562                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5563                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5564                                 break;
5565                 }
5566         }
5567
5568         /* Check if it is timeout */
5569         if (j >= I40E_CHK_Q_ENA_COUNT) {
5570                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
5571                             (on ? "enable" : "disable"), q_idx);
5572                 return I40E_ERR_TIMEOUT;
5573         }
5574
5575         return I40E_SUCCESS;
5576 }
5577 /* Switch on or off the rx queues */
5578 static int
5579 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
5580 {
5581         struct rte_eth_dev_data *dev_data = pf->dev_data;
5582         struct i40e_rx_queue *rxq;
5583         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5584         uint16_t i;
5585         int ret;
5586
5587         for (i = 0; i < dev_data->nb_rx_queues; i++) {
5588                 rxq = dev_data->rx_queues[i];
5589                 /* Don't operate the queue if not configured or
5590                  * if starting only per queue */
5591                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
5592                         continue;
5593                 if (on)
5594                         ret = i40e_dev_rx_queue_start(dev, i);
5595                 else
5596                         ret = i40e_dev_rx_queue_stop(dev, i);
5597                 if (ret != I40E_SUCCESS)
5598                         return ret;
5599         }
5600
5601         return I40E_SUCCESS;
5602 }
5603
5604 /* Switch on or off all the rx/tx queues */
5605 int
5606 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
5607 {
5608         int ret;
5609
5610         if (on) {
5611                 /* enable rx queues before enabling tx queues */
5612                 ret = i40e_dev_switch_rx_queues(pf, on);
5613                 if (ret) {
5614                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
5615                         return ret;
5616                 }
5617                 ret = i40e_dev_switch_tx_queues(pf, on);
5618         } else {
5619                 /* Stop tx queues before stopping rx queues */
5620                 ret = i40e_dev_switch_tx_queues(pf, on);
5621                 if (ret) {
5622                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
5623                         return ret;
5624                 }
5625                 ret = i40e_dev_switch_rx_queues(pf, on);
5626         }
5627
5628         return ret;
5629 }
5630
5631 /* Initialize VSI for TX */
5632 static int
5633 i40e_dev_tx_init(struct i40e_pf *pf)
5634 {
5635         struct rte_eth_dev_data *data = pf->dev_data;
5636         uint16_t i;
5637         uint32_t ret = I40E_SUCCESS;
5638         struct i40e_tx_queue *txq;
5639
5640         for (i = 0; i < data->nb_tx_queues; i++) {
5641                 txq = data->tx_queues[i];
5642                 if (!txq || !txq->q_set)
5643                         continue;
5644                 ret = i40e_tx_queue_init(txq);
5645                 if (ret != I40E_SUCCESS)
5646                         break;
5647         }
5648         if (ret == I40E_SUCCESS)
5649                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
5650                                      ->eth_dev);
5651
5652         return ret;
5653 }
5654
5655 /* Initialize VSI for RX */
5656 static int
5657 i40e_dev_rx_init(struct i40e_pf *pf)
5658 {
5659         struct rte_eth_dev_data *data = pf->dev_data;
5660         int ret = I40E_SUCCESS;
5661         uint16_t i;
5662         struct i40e_rx_queue *rxq;
5663
5664         i40e_pf_config_mq_rx(pf);
5665         for (i = 0; i < data->nb_rx_queues; i++) {
5666                 rxq = data->rx_queues[i];
5667                 if (!rxq || !rxq->q_set)
5668                         continue;
5669
5670                 ret = i40e_rx_queue_init(rxq);
5671                 if (ret != I40E_SUCCESS) {
5672                         PMD_DRV_LOG(ERR,
5673                                 "Failed to do RX queue initialization");
5674                         break;
5675                 }
5676         }
5677         if (ret == I40E_SUCCESS)
5678                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
5679                                      ->eth_dev);
5680
5681         return ret;
5682 }
5683
5684 static int
5685 i40e_dev_rxtx_init(struct i40e_pf *pf)
5686 {
5687         int err;
5688
5689         err = i40e_dev_tx_init(pf);
5690         if (err) {
5691                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
5692                 return err;
5693         }
5694         err = i40e_dev_rx_init(pf);
5695         if (err) {
5696                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
5697                 return err;
5698         }
5699
5700         return err;
5701 }
5702
5703 static int
5704 i40e_vmdq_setup(struct rte_eth_dev *dev)
5705 {
5706         struct rte_eth_conf *conf = &dev->data->dev_conf;
5707         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5708         int i, err, conf_vsis, j, loop;
5709         struct i40e_vsi *vsi;
5710         struct i40e_vmdq_info *vmdq_info;
5711         struct rte_eth_vmdq_rx_conf *vmdq_conf;
5712         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5713
5714         /*
5715          * Disable interrupt to avoid message from VF. Furthermore, it will
5716          * avoid race condition in VSI creation/destroy.
5717          */
5718         i40e_pf_disable_irq0(hw);
5719
5720         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
5721                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
5722                 return -ENOTSUP;
5723         }
5724
5725         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
5726         if (conf_vsis > pf->max_nb_vmdq_vsi) {
5727                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
5728                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
5729                         pf->max_nb_vmdq_vsi);
5730                 return -ENOTSUP;
5731         }
5732
5733         if (pf->vmdq != NULL) {
5734                 PMD_INIT_LOG(INFO, "VMDQ already configured");
5735                 return 0;
5736         }
5737
5738         pf->vmdq = rte_zmalloc("vmdq_info_struct",
5739                                 sizeof(*vmdq_info) * conf_vsis, 0);
5740
5741         if (pf->vmdq == NULL) {
5742                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
5743                 return -ENOMEM;
5744         }
5745
5746         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
5747
5748         /* Create VMDQ VSI */
5749         for (i = 0; i < conf_vsis; i++) {
5750                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
5751                                 vmdq_conf->enable_loop_back);
5752                 if (vsi == NULL) {
5753                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
5754                         err = -1;
5755                         goto err_vsi_setup;
5756                 }
5757                 vmdq_info = &pf->vmdq[i];
5758                 vmdq_info->pf = pf;
5759                 vmdq_info->vsi = vsi;
5760         }
5761         pf->nb_cfg_vmdq_vsi = conf_vsis;
5762
5763         /* Configure Vlan */
5764         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
5765         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
5766                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
5767                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
5768                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
5769                                         vmdq_conf->pool_map[i].vlan_id, j);
5770
5771                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
5772                                                 vmdq_conf->pool_map[i].vlan_id);
5773                                 if (err) {
5774                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
5775                                         err = -1;
5776                                         goto err_vsi_setup;
5777                                 }
5778                         }
5779                 }
5780         }
5781
5782         i40e_pf_enable_irq0(hw);
5783
5784         return 0;
5785
5786 err_vsi_setup:
5787         for (i = 0; i < conf_vsis; i++)
5788                 if (pf->vmdq[i].vsi == NULL)
5789                         break;
5790                 else
5791                         i40e_vsi_release(pf->vmdq[i].vsi);
5792
5793         rte_free(pf->vmdq);
5794         pf->vmdq = NULL;
5795         i40e_pf_enable_irq0(hw);
5796         return err;
5797 }
5798
5799 static void
5800 i40e_stat_update_32(struct i40e_hw *hw,
5801                    uint32_t reg,
5802                    bool offset_loaded,
5803                    uint64_t *offset,
5804                    uint64_t *stat)
5805 {
5806         uint64_t new_data;
5807
5808         new_data = (uint64_t)I40E_READ_REG(hw, reg);
5809         if (!offset_loaded)
5810                 *offset = new_data;
5811
5812         if (new_data >= *offset)
5813                 *stat = (uint64_t)(new_data - *offset);
5814         else
5815                 *stat = (uint64_t)((new_data +
5816                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
5817 }
5818
5819 static void
5820 i40e_stat_update_48(struct i40e_hw *hw,
5821                    uint32_t hireg,
5822                    uint32_t loreg,
5823                    bool offset_loaded,
5824                    uint64_t *offset,
5825                    uint64_t *stat)
5826 {
5827         uint64_t new_data;
5828
5829         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
5830         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
5831                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
5832
5833         if (!offset_loaded)
5834                 *offset = new_data;
5835
5836         if (new_data >= *offset)
5837                 *stat = new_data - *offset;
5838         else
5839                 *stat = (uint64_t)((new_data +
5840                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
5841
5842         *stat &= I40E_48_BIT_MASK;
5843 }
5844
5845 /* Disable IRQ0 */
5846 void
5847 i40e_pf_disable_irq0(struct i40e_hw *hw)
5848 {
5849         /* Disable all interrupt types */
5850         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
5851         I40E_WRITE_FLUSH(hw);
5852 }
5853
5854 /* Enable IRQ0 */
5855 void
5856 i40e_pf_enable_irq0(struct i40e_hw *hw)
5857 {
5858         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
5859                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
5860                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
5861                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
5862         I40E_WRITE_FLUSH(hw);
5863 }
5864
5865 static void
5866 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
5867 {
5868         /* read pending request and disable first */
5869         i40e_pf_disable_irq0(hw);
5870         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
5871         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
5872                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
5873
5874         if (no_queue)
5875                 /* Link no queues with irq0 */
5876                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
5877                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
5878 }
5879
5880 static void
5881 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
5882 {
5883         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5884         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5885         int i;
5886         uint16_t abs_vf_id;
5887         uint32_t index, offset, val;
5888
5889         if (!pf->vfs)
5890                 return;
5891         /**
5892          * Try to find which VF trigger a reset, use absolute VF id to access
5893          * since the reg is global register.
5894          */
5895         for (i = 0; i < pf->vf_num; i++) {
5896                 abs_vf_id = hw->func_caps.vf_base_id + i;
5897                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
5898                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
5899                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
5900                 /* VFR event occurred */
5901                 if (val & (0x1 << offset)) {
5902                         int ret;
5903
5904                         /* Clear the event first */
5905                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
5906                                                         (0x1 << offset));
5907                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
5908                         /**
5909                          * Only notify a VF reset event occurred,
5910                          * don't trigger another SW reset
5911                          */
5912                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
5913                         if (ret != I40E_SUCCESS)
5914                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
5915                 }
5916         }
5917 }
5918
5919 static void
5920 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
5921 {
5922         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5923         int i;
5924
5925         for (i = 0; i < pf->vf_num; i++)
5926                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
5927 }
5928
5929 static void
5930 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
5931 {
5932         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5933         struct i40e_arq_event_info info;
5934         uint16_t pending, opcode;
5935         int ret;
5936
5937         info.buf_len = I40E_AQ_BUF_SZ;
5938         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
5939         if (!info.msg_buf) {
5940                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
5941                 return;
5942         }
5943
5944         pending = 1;
5945         while (pending) {
5946                 ret = i40e_clean_arq_element(hw, &info, &pending);
5947
5948                 if (ret != I40E_SUCCESS) {
5949                         PMD_DRV_LOG(INFO,
5950                                 "Failed to read msg from AdminQ, aq_err: %u",
5951                                 hw->aq.asq_last_status);
5952                         break;
5953                 }
5954                 opcode = rte_le_to_cpu_16(info.desc.opcode);
5955
5956                 switch (opcode) {
5957                 case i40e_aqc_opc_send_msg_to_pf:
5958                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
5959                         i40e_pf_host_handle_vf_msg(dev,
5960                                         rte_le_to_cpu_16(info.desc.retval),
5961                                         rte_le_to_cpu_32(info.desc.cookie_high),
5962                                         rte_le_to_cpu_32(info.desc.cookie_low),
5963                                         info.msg_buf,
5964                                         info.msg_len);
5965                         break;
5966                 case i40e_aqc_opc_get_link_status:
5967                         ret = i40e_dev_link_update(dev, 0);
5968                         if (!ret)
5969                                 _rte_eth_dev_callback_process(dev,
5970                                         RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
5971                         break;
5972                 default:
5973                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
5974                                     opcode);
5975                         break;
5976                 }
5977         }
5978         rte_free(info.msg_buf);
5979 }
5980
5981 /**
5982  * Interrupt handler triggered by NIC  for handling
5983  * specific interrupt.
5984  *
5985  * @param handle
5986  *  Pointer to interrupt handle.
5987  * @param param
5988  *  The address of parameter (struct rte_eth_dev *) regsitered before.
5989  *
5990  * @return
5991  *  void
5992  */
5993 static void
5994 i40e_dev_interrupt_handler(void *param)
5995 {
5996         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
5997         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5998         uint32_t icr0;
5999
6000         /* Disable interrupt */
6001         i40e_pf_disable_irq0(hw);
6002
6003         /* read out interrupt causes */
6004         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6005
6006         /* No interrupt event indicated */
6007         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6008                 PMD_DRV_LOG(INFO, "No interrupt event");
6009                 goto done;
6010         }
6011         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6012                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6013         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6014                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6015         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6016                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6017         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6018                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6019         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6020                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6021         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6022                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6023         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6024                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6025
6026         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6027                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6028                 i40e_dev_handle_vfr_event(dev);
6029         }
6030         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6031                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6032                 i40e_dev_handle_aq_msg(dev);
6033         }
6034
6035 done:
6036         /* Enable interrupt */
6037         i40e_pf_enable_irq0(hw);
6038         rte_intr_enable(dev->intr_handle);
6039 }
6040
6041 int
6042 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6043                          struct i40e_macvlan_filter *filter,
6044                          int total)
6045 {
6046         int ele_num, ele_buff_size;
6047         int num, actual_num, i;
6048         uint16_t flags;
6049         int ret = I40E_SUCCESS;
6050         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6051         struct i40e_aqc_add_macvlan_element_data *req_list;
6052
6053         if (filter == NULL  || total == 0)
6054                 return I40E_ERR_PARAM;
6055         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6056         ele_buff_size = hw->aq.asq_buf_size;
6057
6058         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6059         if (req_list == NULL) {
6060                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6061                 return I40E_ERR_NO_MEMORY;
6062         }
6063
6064         num = 0;
6065         do {
6066                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6067                 memset(req_list, 0, ele_buff_size);
6068
6069                 for (i = 0; i < actual_num; i++) {
6070                         rte_memcpy(req_list[i].mac_addr,
6071                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6072                         req_list[i].vlan_tag =
6073                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6074
6075                         switch (filter[num + i].filter_type) {
6076                         case RTE_MAC_PERFECT_MATCH:
6077                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6078                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6079                                 break;
6080                         case RTE_MACVLAN_PERFECT_MATCH:
6081                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6082                                 break;
6083                         case RTE_MAC_HASH_MATCH:
6084                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6085                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6086                                 break;
6087                         case RTE_MACVLAN_HASH_MATCH:
6088                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6089                                 break;
6090                         default:
6091                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6092                                 ret = I40E_ERR_PARAM;
6093                                 goto DONE;
6094                         }
6095
6096                         req_list[i].queue_number = 0;
6097
6098                         req_list[i].flags = rte_cpu_to_le_16(flags);
6099                 }
6100
6101                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6102                                                 actual_num, NULL);
6103                 if (ret != I40E_SUCCESS) {
6104                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6105                         goto DONE;
6106                 }
6107                 num += actual_num;
6108         } while (num < total);
6109
6110 DONE:
6111         rte_free(req_list);
6112         return ret;
6113 }
6114
6115 int
6116 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6117                             struct i40e_macvlan_filter *filter,
6118                             int total)
6119 {
6120         int ele_num, ele_buff_size;
6121         int num, actual_num, i;
6122         uint16_t flags;
6123         int ret = I40E_SUCCESS;
6124         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6125         struct i40e_aqc_remove_macvlan_element_data *req_list;
6126
6127         if (filter == NULL  || total == 0)
6128                 return I40E_ERR_PARAM;
6129
6130         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6131         ele_buff_size = hw->aq.asq_buf_size;
6132
6133         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6134         if (req_list == NULL) {
6135                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6136                 return I40E_ERR_NO_MEMORY;
6137         }
6138
6139         num = 0;
6140         do {
6141                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6142                 memset(req_list, 0, ele_buff_size);
6143
6144                 for (i = 0; i < actual_num; i++) {
6145                         rte_memcpy(req_list[i].mac_addr,
6146                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6147                         req_list[i].vlan_tag =
6148                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6149
6150                         switch (filter[num + i].filter_type) {
6151                         case RTE_MAC_PERFECT_MATCH:
6152                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6153                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6154                                 break;
6155                         case RTE_MACVLAN_PERFECT_MATCH:
6156                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6157                                 break;
6158                         case RTE_MAC_HASH_MATCH:
6159                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6160                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6161                                 break;
6162                         case RTE_MACVLAN_HASH_MATCH:
6163                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6164                                 break;
6165                         default:
6166                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6167                                 ret = I40E_ERR_PARAM;
6168                                 goto DONE;
6169                         }
6170                         req_list[i].flags = rte_cpu_to_le_16(flags);
6171                 }
6172
6173                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6174                                                 actual_num, NULL);
6175                 if (ret != I40E_SUCCESS) {
6176                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6177                         goto DONE;
6178                 }
6179                 num += actual_num;
6180         } while (num < total);
6181
6182 DONE:
6183         rte_free(req_list);
6184         return ret;
6185 }
6186
6187 /* Find out specific MAC filter */
6188 static struct i40e_mac_filter *
6189 i40e_find_mac_filter(struct i40e_vsi *vsi,
6190                          struct ether_addr *macaddr)
6191 {
6192         struct i40e_mac_filter *f;
6193
6194         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6195                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6196                         return f;
6197         }
6198
6199         return NULL;
6200 }
6201
6202 static bool
6203 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6204                          uint16_t vlan_id)
6205 {
6206         uint32_t vid_idx, vid_bit;
6207
6208         if (vlan_id > ETH_VLAN_ID_MAX)
6209                 return 0;
6210
6211         vid_idx = I40E_VFTA_IDX(vlan_id);
6212         vid_bit = I40E_VFTA_BIT(vlan_id);
6213
6214         if (vsi->vfta[vid_idx] & vid_bit)
6215                 return 1;
6216         else
6217                 return 0;
6218 }
6219
6220 static void
6221 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6222                        uint16_t vlan_id, bool on)
6223 {
6224         uint32_t vid_idx, vid_bit;
6225
6226         vid_idx = I40E_VFTA_IDX(vlan_id);
6227         vid_bit = I40E_VFTA_BIT(vlan_id);
6228
6229         if (on)
6230                 vsi->vfta[vid_idx] |= vid_bit;
6231         else
6232                 vsi->vfta[vid_idx] &= ~vid_bit;
6233 }
6234
6235 void
6236 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6237                      uint16_t vlan_id, bool on)
6238 {
6239         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6240         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6241         int ret;
6242
6243         if (vlan_id > ETH_VLAN_ID_MAX)
6244                 return;
6245
6246         i40e_store_vlan_filter(vsi, vlan_id, on);
6247
6248         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6249                 return;
6250
6251         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6252
6253         if (on) {
6254                 ret = i40e_aq_add_vlan(hw, vsi->seid,
6255                                        &vlan_data, 1, NULL);
6256                 if (ret != I40E_SUCCESS)
6257                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6258         } else {
6259                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6260                                           &vlan_data, 1, NULL);
6261                 if (ret != I40E_SUCCESS)
6262                         PMD_DRV_LOG(ERR,
6263                                     "Failed to remove vlan filter");
6264         }
6265 }
6266
6267 /**
6268  * Find all vlan options for specific mac addr,
6269  * return with actual vlan found.
6270  */
6271 int
6272 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6273                            struct i40e_macvlan_filter *mv_f,
6274                            int num, struct ether_addr *addr)
6275 {
6276         int i;
6277         uint32_t j, k;
6278
6279         /**
6280          * Not to use i40e_find_vlan_filter to decrease the loop time,
6281          * although the code looks complex.
6282           */
6283         if (num < vsi->vlan_num)
6284                 return I40E_ERR_PARAM;
6285
6286         i = 0;
6287         for (j = 0; j < I40E_VFTA_SIZE; j++) {
6288                 if (vsi->vfta[j]) {
6289                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6290                                 if (vsi->vfta[j] & (1 << k)) {
6291                                         if (i > num - 1) {
6292                                                 PMD_DRV_LOG(ERR,
6293                                                         "vlan number doesn't match");
6294                                                 return I40E_ERR_PARAM;
6295                                         }
6296                                         rte_memcpy(&mv_f[i].macaddr,
6297                                                         addr, ETH_ADDR_LEN);
6298                                         mv_f[i].vlan_id =
6299                                                 j * I40E_UINT32_BIT_SIZE + k;
6300                                         i++;
6301                                 }
6302                         }
6303                 }
6304         }
6305         return I40E_SUCCESS;
6306 }
6307
6308 static inline int
6309 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6310                            struct i40e_macvlan_filter *mv_f,
6311                            int num,
6312                            uint16_t vlan)
6313 {
6314         int i = 0;
6315         struct i40e_mac_filter *f;
6316
6317         if (num < vsi->mac_num)
6318                 return I40E_ERR_PARAM;
6319
6320         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6321                 if (i > num - 1) {
6322                         PMD_DRV_LOG(ERR, "buffer number not match");
6323                         return I40E_ERR_PARAM;
6324                 }
6325                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6326                                 ETH_ADDR_LEN);
6327                 mv_f[i].vlan_id = vlan;
6328                 mv_f[i].filter_type = f->mac_info.filter_type;
6329                 i++;
6330         }
6331
6332         return I40E_SUCCESS;
6333 }
6334
6335 static int
6336 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6337 {
6338         int i, j, num;
6339         struct i40e_mac_filter *f;
6340         struct i40e_macvlan_filter *mv_f;
6341         int ret = I40E_SUCCESS;
6342
6343         if (vsi == NULL || vsi->mac_num == 0)
6344                 return I40E_ERR_PARAM;
6345
6346         /* Case that no vlan is set */
6347         if (vsi->vlan_num == 0)
6348                 num = vsi->mac_num;
6349         else
6350                 num = vsi->mac_num * vsi->vlan_num;
6351
6352         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6353         if (mv_f == NULL) {
6354                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6355                 return I40E_ERR_NO_MEMORY;
6356         }
6357
6358         i = 0;
6359         if (vsi->vlan_num == 0) {
6360                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6361                         rte_memcpy(&mv_f[i].macaddr,
6362                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6363                         mv_f[i].filter_type = f->mac_info.filter_type;
6364                         mv_f[i].vlan_id = 0;
6365                         i++;
6366                 }
6367         } else {
6368                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6369                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6370                                         vsi->vlan_num, &f->mac_info.mac_addr);
6371                         if (ret != I40E_SUCCESS)
6372                                 goto DONE;
6373                         for (j = i; j < i + vsi->vlan_num; j++)
6374                                 mv_f[j].filter_type = f->mac_info.filter_type;
6375                         i += vsi->vlan_num;
6376                 }
6377         }
6378
6379         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6380 DONE:
6381         rte_free(mv_f);
6382
6383         return ret;
6384 }
6385
6386 int
6387 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6388 {
6389         struct i40e_macvlan_filter *mv_f;
6390         int mac_num;
6391         int ret = I40E_SUCCESS;
6392
6393         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6394                 return I40E_ERR_PARAM;
6395
6396         /* If it's already set, just return */
6397         if (i40e_find_vlan_filter(vsi,vlan))
6398                 return I40E_SUCCESS;
6399
6400         mac_num = vsi->mac_num;
6401
6402         if (mac_num == 0) {
6403                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6404                 return I40E_ERR_PARAM;
6405         }
6406
6407         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6408
6409         if (mv_f == NULL) {
6410                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6411                 return I40E_ERR_NO_MEMORY;
6412         }
6413
6414         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6415
6416         if (ret != I40E_SUCCESS)
6417                 goto DONE;
6418
6419         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6420
6421         if (ret != I40E_SUCCESS)
6422                 goto DONE;
6423
6424         i40e_set_vlan_filter(vsi, vlan, 1);
6425
6426         vsi->vlan_num++;
6427         ret = I40E_SUCCESS;
6428 DONE:
6429         rte_free(mv_f);
6430         return ret;
6431 }
6432
6433 int
6434 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6435 {
6436         struct i40e_macvlan_filter *mv_f;
6437         int mac_num;
6438         int ret = I40E_SUCCESS;
6439
6440         /**
6441          * Vlan 0 is the generic filter for untagged packets
6442          * and can't be removed.
6443          */
6444         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6445                 return I40E_ERR_PARAM;
6446
6447         /* If can't find it, just return */
6448         if (!i40e_find_vlan_filter(vsi, vlan))
6449                 return I40E_ERR_PARAM;
6450
6451         mac_num = vsi->mac_num;
6452
6453         if (mac_num == 0) {
6454                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6455                 return I40E_ERR_PARAM;
6456         }
6457
6458         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6459
6460         if (mv_f == NULL) {
6461                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6462                 return I40E_ERR_NO_MEMORY;
6463         }
6464
6465         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6466
6467         if (ret != I40E_SUCCESS)
6468                 goto DONE;
6469
6470         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6471
6472         if (ret != I40E_SUCCESS)
6473                 goto DONE;
6474
6475         /* This is last vlan to remove, replace all mac filter with vlan 0 */
6476         if (vsi->vlan_num == 1) {
6477                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6478                 if (ret != I40E_SUCCESS)
6479                         goto DONE;
6480
6481                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6482                 if (ret != I40E_SUCCESS)
6483                         goto DONE;
6484         }
6485
6486         i40e_set_vlan_filter(vsi, vlan, 0);
6487
6488         vsi->vlan_num--;
6489         ret = I40E_SUCCESS;
6490 DONE:
6491         rte_free(mv_f);
6492         return ret;
6493 }
6494
6495 int
6496 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6497 {
6498         struct i40e_mac_filter *f;
6499         struct i40e_macvlan_filter *mv_f;
6500         int i, vlan_num = 0;
6501         int ret = I40E_SUCCESS;
6502
6503         /* If it's add and we've config it, return */
6504         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6505         if (f != NULL)
6506                 return I40E_SUCCESS;
6507         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6508                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6509
6510                 /**
6511                  * If vlan_num is 0, that's the first time to add mac,
6512                  * set mask for vlan_id 0.
6513                  */
6514                 if (vsi->vlan_num == 0) {
6515                         i40e_set_vlan_filter(vsi, 0, 1);
6516                         vsi->vlan_num = 1;
6517                 }
6518                 vlan_num = vsi->vlan_num;
6519         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6520                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6521                 vlan_num = 1;
6522
6523         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6524         if (mv_f == NULL) {
6525                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6526                 return I40E_ERR_NO_MEMORY;
6527         }
6528
6529         for (i = 0; i < vlan_num; i++) {
6530                 mv_f[i].filter_type = mac_filter->filter_type;
6531                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6532                                 ETH_ADDR_LEN);
6533         }
6534
6535         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6536                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6537                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6538                                         &mac_filter->mac_addr);
6539                 if (ret != I40E_SUCCESS)
6540                         goto DONE;
6541         }
6542
6543         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6544         if (ret != I40E_SUCCESS)
6545                 goto DONE;
6546
6547         /* Add the mac addr into mac list */
6548         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6549         if (f == NULL) {
6550                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6551                 ret = I40E_ERR_NO_MEMORY;
6552                 goto DONE;
6553         }
6554         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6555                         ETH_ADDR_LEN);
6556         f->mac_info.filter_type = mac_filter->filter_type;
6557         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6558         vsi->mac_num++;
6559
6560         ret = I40E_SUCCESS;
6561 DONE:
6562         rte_free(mv_f);
6563
6564         return ret;
6565 }
6566
6567 int
6568 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
6569 {
6570         struct i40e_mac_filter *f;
6571         struct i40e_macvlan_filter *mv_f;
6572         int i, vlan_num;
6573         enum rte_mac_filter_type filter_type;
6574         int ret = I40E_SUCCESS;
6575
6576         /* Can't find it, return an error */
6577         f = i40e_find_mac_filter(vsi, addr);
6578         if (f == NULL)
6579                 return I40E_ERR_PARAM;
6580
6581         vlan_num = vsi->vlan_num;
6582         filter_type = f->mac_info.filter_type;
6583         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6584                 filter_type == RTE_MACVLAN_HASH_MATCH) {
6585                 if (vlan_num == 0) {
6586                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
6587                         return I40E_ERR_PARAM;
6588                 }
6589         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
6590                         filter_type == RTE_MAC_HASH_MATCH)
6591                 vlan_num = 1;
6592
6593         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6594         if (mv_f == NULL) {
6595                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6596                 return I40E_ERR_NO_MEMORY;
6597         }
6598
6599         for (i = 0; i < vlan_num; i++) {
6600                 mv_f[i].filter_type = filter_type;
6601                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6602                                 ETH_ADDR_LEN);
6603         }
6604         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6605                         filter_type == RTE_MACVLAN_HASH_MATCH) {
6606                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
6607                 if (ret != I40E_SUCCESS)
6608                         goto DONE;
6609         }
6610
6611         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
6612         if (ret != I40E_SUCCESS)
6613                 goto DONE;
6614
6615         /* Remove the mac addr into mac list */
6616         TAILQ_REMOVE(&vsi->mac_list, f, next);
6617         rte_free(f);
6618         vsi->mac_num--;
6619
6620         ret = I40E_SUCCESS;
6621 DONE:
6622         rte_free(mv_f);
6623         return ret;
6624 }
6625
6626 /* Configure hash enable flags for RSS */
6627 uint64_t
6628 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
6629 {
6630         uint64_t hena = 0;
6631         int i;
6632
6633         if (!flags)
6634                 return hena;
6635
6636         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
6637                 if (flags & (1ULL << i))
6638                         hena |= adapter->pctypes_tbl[i];
6639         }
6640
6641         return hena;
6642 }
6643
6644 /* Parse the hash enable flags */
6645 uint64_t
6646 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
6647 {
6648         uint64_t rss_hf = 0;
6649
6650         if (!flags)
6651                 return rss_hf;
6652         int i;
6653
6654         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
6655                 if (flags & adapter->pctypes_tbl[i])
6656                         rss_hf |= (1ULL << i);
6657         }
6658         return rss_hf;
6659 }
6660
6661 /* Disable RSS */
6662 static void
6663 i40e_pf_disable_rss(struct i40e_pf *pf)
6664 {
6665         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6666
6667         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
6668         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
6669         I40E_WRITE_FLUSH(hw);
6670 }
6671
6672 static int
6673 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
6674 {
6675         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6676         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6677         int ret = 0;
6678
6679         if (!key || key_len == 0) {
6680                 PMD_DRV_LOG(DEBUG, "No key to be configured");
6681                 return 0;
6682         } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6683                 sizeof(uint32_t)) {
6684                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
6685                 return -EINVAL;
6686         }
6687
6688         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6689                 struct i40e_aqc_get_set_rss_key_data *key_dw =
6690                         (struct i40e_aqc_get_set_rss_key_data *)key;
6691
6692                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
6693                 if (ret)
6694                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
6695         } else {
6696                 uint32_t *hash_key = (uint32_t *)key;
6697                 uint16_t i;
6698
6699                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6700                         i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]);
6701                 I40E_WRITE_FLUSH(hw);
6702         }
6703
6704         return ret;
6705 }
6706
6707 static int
6708 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
6709 {
6710         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6711         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6712         int ret;
6713
6714         if (!key || !key_len)
6715                 return -EINVAL;
6716
6717         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6718                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
6719                         (struct i40e_aqc_get_set_rss_key_data *)key);
6720                 if (ret) {
6721                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
6722                         return ret;
6723                 }
6724         } else {
6725                 uint32_t *key_dw = (uint32_t *)key;
6726                 uint16_t i;
6727
6728                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6729                         key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
6730         }
6731         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
6732
6733         return 0;
6734 }
6735
6736 static int
6737 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
6738 {
6739         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6740         uint64_t hena;
6741         int ret;
6742
6743         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
6744                                rss_conf->rss_key_len);
6745         if (ret)
6746                 return ret;
6747
6748         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
6749         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6750         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6751         I40E_WRITE_FLUSH(hw);
6752
6753         return 0;
6754 }
6755
6756 static int
6757 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
6758                          struct rte_eth_rss_conf *rss_conf)
6759 {
6760         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6761         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6762         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
6763         uint64_t hena;
6764
6765         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6766         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6767
6768         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
6769                 if (rss_hf != 0) /* Enable RSS */
6770                         return -EINVAL;
6771                 return 0; /* Nothing to do */
6772         }
6773         /* RSS enabled */
6774         if (rss_hf == 0) /* Disable RSS */
6775                 return -EINVAL;
6776
6777         return i40e_hw_rss_hash_set(pf, rss_conf);
6778 }
6779
6780 static int
6781 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
6782                            struct rte_eth_rss_conf *rss_conf)
6783 {
6784         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6785         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6786         uint64_t hena;
6787
6788         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
6789                          &rss_conf->rss_key_len);
6790
6791         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6792         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6793         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
6794
6795         return 0;
6796 }
6797
6798 static int
6799 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
6800 {
6801         switch (filter_type) {
6802         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
6803                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
6804                 break;
6805         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
6806                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
6807                 break;
6808         case RTE_TUNNEL_FILTER_IMAC_TENID:
6809                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
6810                 break;
6811         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
6812                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
6813                 break;
6814         case ETH_TUNNEL_FILTER_IMAC:
6815                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
6816                 break;
6817         case ETH_TUNNEL_FILTER_OIP:
6818                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
6819                 break;
6820         case ETH_TUNNEL_FILTER_IIP:
6821                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
6822                 break;
6823         default:
6824                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
6825                 return -EINVAL;
6826         }
6827
6828         return 0;
6829 }
6830
6831 /* Convert tunnel filter structure */
6832 static int
6833 i40e_tunnel_filter_convert(
6834         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
6835         struct i40e_tunnel_filter *tunnel_filter)
6836 {
6837         ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
6838                         (struct ether_addr *)&tunnel_filter->input.outer_mac);
6839         ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
6840                         (struct ether_addr *)&tunnel_filter->input.inner_mac);
6841         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
6842         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
6843              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
6844             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
6845                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
6846         else
6847                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
6848         tunnel_filter->input.flags = cld_filter->element.flags;
6849         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
6850         tunnel_filter->queue = cld_filter->element.queue_number;
6851         rte_memcpy(tunnel_filter->input.general_fields,
6852                    cld_filter->general_fields,
6853                    sizeof(cld_filter->general_fields));
6854
6855         return 0;
6856 }
6857
6858 /* Check if there exists the tunnel filter */
6859 struct i40e_tunnel_filter *
6860 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
6861                              const struct i40e_tunnel_filter_input *input)
6862 {
6863         int ret;
6864
6865         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
6866         if (ret < 0)
6867                 return NULL;
6868
6869         return tunnel_rule->hash_map[ret];
6870 }
6871
6872 /* Add a tunnel filter into the SW list */
6873 static int
6874 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
6875                              struct i40e_tunnel_filter *tunnel_filter)
6876 {
6877         struct i40e_tunnel_rule *rule = &pf->tunnel;
6878         int ret;
6879
6880         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
6881         if (ret < 0) {
6882                 PMD_DRV_LOG(ERR,
6883                             "Failed to insert tunnel filter to hash table %d!",
6884                             ret);
6885                 return ret;
6886         }
6887         rule->hash_map[ret] = tunnel_filter;
6888
6889         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
6890
6891         return 0;
6892 }
6893
6894 /* Delete a tunnel filter from the SW list */
6895 int
6896 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
6897                           struct i40e_tunnel_filter_input *input)
6898 {
6899         struct i40e_tunnel_rule *rule = &pf->tunnel;
6900         struct i40e_tunnel_filter *tunnel_filter;
6901         int ret;
6902
6903         ret = rte_hash_del_key(rule->hash_table, input);
6904         if (ret < 0) {
6905                 PMD_DRV_LOG(ERR,
6906                             "Failed to delete tunnel filter to hash table %d!",
6907                             ret);
6908                 return ret;
6909         }
6910         tunnel_filter = rule->hash_map[ret];
6911         rule->hash_map[ret] = NULL;
6912
6913         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
6914         rte_free(tunnel_filter);
6915
6916         return 0;
6917 }
6918
6919 int
6920 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
6921                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
6922                         uint8_t add)
6923 {
6924         uint16_t ip_type;
6925         uint32_t ipv4_addr, ipv4_addr_le;
6926         uint8_t i, tun_type = 0;
6927         /* internal varialbe to convert ipv6 byte order */
6928         uint32_t convert_ipv6[4];
6929         int val, ret = 0;
6930         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6931         struct i40e_vsi *vsi = pf->main_vsi;
6932         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
6933         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
6934         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
6935         struct i40e_tunnel_filter *tunnel, *node;
6936         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
6937
6938         cld_filter = rte_zmalloc("tunnel_filter",
6939                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
6940         0);
6941
6942         if (NULL == cld_filter) {
6943                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
6944                 return -ENOMEM;
6945         }
6946         pfilter = cld_filter;
6947
6948         ether_addr_copy(&tunnel_filter->outer_mac,
6949                         (struct ether_addr *)&pfilter->element.outer_mac);
6950         ether_addr_copy(&tunnel_filter->inner_mac,
6951                         (struct ether_addr *)&pfilter->element.inner_mac);
6952
6953         pfilter->element.inner_vlan =
6954                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
6955         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
6956                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
6957                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
6958                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
6959                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
6960                                 &ipv4_addr_le,
6961                                 sizeof(pfilter->element.ipaddr.v4.data));
6962         } else {
6963                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
6964                 for (i = 0; i < 4; i++) {
6965                         convert_ipv6[i] =
6966                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
6967                 }
6968                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
6969                            &convert_ipv6,
6970                            sizeof(pfilter->element.ipaddr.v6.data));
6971         }
6972
6973         /* check tunneled type */
6974         switch (tunnel_filter->tunnel_type) {
6975         case RTE_TUNNEL_TYPE_VXLAN:
6976                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
6977                 break;
6978         case RTE_TUNNEL_TYPE_NVGRE:
6979                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
6980                 break;
6981         case RTE_TUNNEL_TYPE_IP_IN_GRE:
6982                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
6983                 break;
6984         default:
6985                 /* Other tunnel types is not supported. */
6986                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
6987                 rte_free(cld_filter);
6988                 return -EINVAL;
6989         }
6990
6991         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
6992                                        &pfilter->element.flags);
6993         if (val < 0) {
6994                 rte_free(cld_filter);
6995                 return -EINVAL;
6996         }
6997
6998         pfilter->element.flags |= rte_cpu_to_le_16(
6999                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7000                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7001         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7002         pfilter->element.queue_number =
7003                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7004
7005         /* Check if there is the filter in SW list */
7006         memset(&check_filter, 0, sizeof(check_filter));
7007         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7008         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7009         if (add && node) {
7010                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7011                 return -EINVAL;
7012         }
7013
7014         if (!add && !node) {
7015                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7016                 return -EINVAL;
7017         }
7018
7019         if (add) {
7020                 ret = i40e_aq_add_cloud_filters(hw,
7021                                         vsi->seid, &cld_filter->element, 1);
7022                 if (ret < 0) {
7023                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7024                         return -ENOTSUP;
7025                 }
7026                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7027                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7028                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7029         } else {
7030                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7031                                                    &cld_filter->element, 1);
7032                 if (ret < 0) {
7033                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7034                         return -ENOTSUP;
7035                 }
7036                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7037         }
7038
7039         rte_free(cld_filter);
7040         return ret;
7041 }
7042
7043 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7044 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7045 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7046 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7047 #define I40E_TR_GRE_KEY_MASK                    0x400
7048 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7049 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7050
7051 static enum
7052 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7053 {
7054         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7055         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7056         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7057         enum i40e_status_code status = I40E_SUCCESS;
7058
7059         memset(&filter_replace, 0,
7060                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7061         memset(&filter_replace_buf, 0,
7062                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7063
7064         /* create L1 filter */
7065         filter_replace.old_filter_type =
7066                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7067         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7068         filter_replace.tr_bit = 0;
7069
7070         /* Prepare the buffer, 3 entries */
7071         filter_replace_buf.data[0] =
7072                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7073         filter_replace_buf.data[0] |=
7074                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7075         filter_replace_buf.data[2] = 0xFF;
7076         filter_replace_buf.data[3] = 0xFF;
7077         filter_replace_buf.data[4] =
7078                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7079         filter_replace_buf.data[4] |=
7080                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7081         filter_replace_buf.data[7] = 0xF0;
7082         filter_replace_buf.data[8]
7083                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7084         filter_replace_buf.data[8] |=
7085                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7086         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7087                 I40E_TR_GENEVE_KEY_MASK |
7088                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7089         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7090                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7091                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7092
7093         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7094                                                &filter_replace_buf);
7095         return status;
7096 }
7097
7098 static enum
7099 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7100 {
7101         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7102         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7103         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7104         enum i40e_status_code status = I40E_SUCCESS;
7105
7106         /* For MPLSoUDP */
7107         memset(&filter_replace, 0,
7108                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7109         memset(&filter_replace_buf, 0,
7110                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7111         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7112                 I40E_AQC_MIRROR_CLOUD_FILTER;
7113         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7114         filter_replace.new_filter_type =
7115                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7116         /* Prepare the buffer, 2 entries */
7117         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7118         filter_replace_buf.data[0] |=
7119                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7120         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7121         filter_replace_buf.data[4] |=
7122                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7123         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7124                                                &filter_replace_buf);
7125         if (status < 0)
7126                 return status;
7127
7128         /* For MPLSoGRE */
7129         memset(&filter_replace, 0,
7130                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7131         memset(&filter_replace_buf, 0,
7132                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7133
7134         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7135                 I40E_AQC_MIRROR_CLOUD_FILTER;
7136         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7137         filter_replace.new_filter_type =
7138                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7139         /* Prepare the buffer, 2 entries */
7140         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7141         filter_replace_buf.data[0] |=
7142                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7143         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7144         filter_replace_buf.data[4] |=
7145                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7146
7147         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7148                                                &filter_replace_buf);
7149         return status;
7150 }
7151
7152 static enum i40e_status_code
7153 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7154 {
7155         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7156         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7157         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7158         enum i40e_status_code status = I40E_SUCCESS;
7159
7160         /* For GTP-C */
7161         memset(&filter_replace, 0,
7162                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7163         memset(&filter_replace_buf, 0,
7164                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7165         /* create L1 filter */
7166         filter_replace.old_filter_type =
7167                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7168         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
7169         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
7170                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7171         /* Prepare the buffer, 2 entries */
7172         filter_replace_buf.data[0] =
7173                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7174         filter_replace_buf.data[0] |=
7175                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7176         filter_replace_buf.data[2] = 0xFF;
7177         filter_replace_buf.data[3] = 0xFF;
7178         filter_replace_buf.data[4] =
7179                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7180         filter_replace_buf.data[4] |=
7181                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7182         filter_replace_buf.data[6] = 0xFF;
7183         filter_replace_buf.data[7] = 0xFF;
7184         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7185                                                &filter_replace_buf);
7186         if (status < 0)
7187                 return status;
7188
7189         /* for GTP-U */
7190         memset(&filter_replace, 0,
7191                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7192         memset(&filter_replace_buf, 0,
7193                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7194         /* create L1 filter */
7195         filter_replace.old_filter_type =
7196                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
7197         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
7198         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
7199                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7200         /* Prepare the buffer, 2 entries */
7201         filter_replace_buf.data[0] =
7202                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7203         filter_replace_buf.data[0] |=
7204                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7205         filter_replace_buf.data[2] = 0xFF;
7206         filter_replace_buf.data[3] = 0xFF;
7207         filter_replace_buf.data[4] =
7208                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7209         filter_replace_buf.data[4] |=
7210                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7211         filter_replace_buf.data[6] = 0xFF;
7212         filter_replace_buf.data[7] = 0xFF;
7213
7214         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7215                                                &filter_replace_buf);
7216         return status;
7217 }
7218
7219 static enum
7220 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
7221 {
7222         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7223         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7224         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7225         enum i40e_status_code status = I40E_SUCCESS;
7226
7227         /* for GTP-C */
7228         memset(&filter_replace, 0,
7229                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7230         memset(&filter_replace_buf, 0,
7231                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7232         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7233         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7234         filter_replace.new_filter_type =
7235                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7236         /* Prepare the buffer, 2 entries */
7237         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
7238         filter_replace_buf.data[0] |=
7239                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7240         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7241         filter_replace_buf.data[4] |=
7242                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7243         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7244                                                &filter_replace_buf);
7245         if (status < 0)
7246                 return status;
7247
7248         /* for GTP-U */
7249         memset(&filter_replace, 0,
7250                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7251         memset(&filter_replace_buf, 0,
7252                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7253         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7254         filter_replace.old_filter_type =
7255                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7256         filter_replace.new_filter_type =
7257                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7258         /* Prepare the buffer, 2 entries */
7259         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
7260         filter_replace_buf.data[0] |=
7261                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7262         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7263         filter_replace_buf.data[4] |=
7264                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7265
7266         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7267                                                &filter_replace_buf);
7268         return status;
7269 }
7270
7271 int
7272 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7273                       struct i40e_tunnel_filter_conf *tunnel_filter,
7274                       uint8_t add)
7275 {
7276         uint16_t ip_type;
7277         uint32_t ipv4_addr, ipv4_addr_le;
7278         uint8_t i, tun_type = 0;
7279         /* internal variable to convert ipv6 byte order */
7280         uint32_t convert_ipv6[4];
7281         int val, ret = 0;
7282         struct i40e_pf_vf *vf = NULL;
7283         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7284         struct i40e_vsi *vsi;
7285         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7286         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7287         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7288         struct i40e_tunnel_filter *tunnel, *node;
7289         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7290         uint32_t teid_le;
7291         bool big_buffer = 0;
7292
7293         cld_filter = rte_zmalloc("tunnel_filter",
7294                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7295                          0);
7296
7297         if (cld_filter == NULL) {
7298                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7299                 return -ENOMEM;
7300         }
7301         pfilter = cld_filter;
7302
7303         ether_addr_copy(&tunnel_filter->outer_mac,
7304                         (struct ether_addr *)&pfilter->element.outer_mac);
7305         ether_addr_copy(&tunnel_filter->inner_mac,
7306                         (struct ether_addr *)&pfilter->element.inner_mac);
7307
7308         pfilter->element.inner_vlan =
7309                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7310         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7311                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7312                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7313                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7314                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7315                                 &ipv4_addr_le,
7316                                 sizeof(pfilter->element.ipaddr.v4.data));
7317         } else {
7318                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7319                 for (i = 0; i < 4; i++) {
7320                         convert_ipv6[i] =
7321                         rte_cpu_to_le_32(rte_be_to_cpu_32(
7322                                          tunnel_filter->ip_addr.ipv6_addr[i]));
7323                 }
7324                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7325                            &convert_ipv6,
7326                            sizeof(pfilter->element.ipaddr.v6.data));
7327         }
7328
7329         /* check tunneled type */
7330         switch (tunnel_filter->tunnel_type) {
7331         case I40E_TUNNEL_TYPE_VXLAN:
7332                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7333                 break;
7334         case I40E_TUNNEL_TYPE_NVGRE:
7335                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7336                 break;
7337         case I40E_TUNNEL_TYPE_IP_IN_GRE:
7338                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7339                 break;
7340         case I40E_TUNNEL_TYPE_MPLSoUDP:
7341                 if (!pf->mpls_replace_flag) {
7342                         i40e_replace_mpls_l1_filter(pf);
7343                         i40e_replace_mpls_cloud_filter(pf);
7344                         pf->mpls_replace_flag = 1;
7345                 }
7346                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7347                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7348                         teid_le >> 4;
7349                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7350                         (teid_le & 0xF) << 12;
7351                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7352                         0x40;
7353                 big_buffer = 1;
7354                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
7355                 break;
7356         case I40E_TUNNEL_TYPE_MPLSoGRE:
7357                 if (!pf->mpls_replace_flag) {
7358                         i40e_replace_mpls_l1_filter(pf);
7359                         i40e_replace_mpls_cloud_filter(pf);
7360                         pf->mpls_replace_flag = 1;
7361                 }
7362                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7363                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7364                         teid_le >> 4;
7365                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7366                         (teid_le & 0xF) << 12;
7367                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7368                         0x0;
7369                 big_buffer = 1;
7370                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
7371                 break;
7372         case I40E_TUNNEL_TYPE_GTPC:
7373                 if (!pf->gtp_replace_flag) {
7374                         i40e_replace_gtp_l1_filter(pf);
7375                         i40e_replace_gtp_cloud_filter(pf);
7376                         pf->gtp_replace_flag = 1;
7377                 }
7378                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7379                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
7380                         (teid_le >> 16) & 0xFFFF;
7381                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
7382                         teid_le & 0xFFFF;
7383                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
7384                         0x0;
7385                 big_buffer = 1;
7386                 break;
7387         case I40E_TUNNEL_TYPE_GTPU:
7388                 if (!pf->gtp_replace_flag) {
7389                         i40e_replace_gtp_l1_filter(pf);
7390                         i40e_replace_gtp_cloud_filter(pf);
7391                         pf->gtp_replace_flag = 1;
7392                 }
7393                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7394                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
7395                         (teid_le >> 16) & 0xFFFF;
7396                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
7397                         teid_le & 0xFFFF;
7398                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
7399                         0x0;
7400                 big_buffer = 1;
7401                 break;
7402         case I40E_TUNNEL_TYPE_QINQ:
7403                 if (!pf->qinq_replace_flag) {
7404                         ret = i40e_cloud_filter_qinq_create(pf);
7405                         if (ret < 0)
7406                                 PMD_DRV_LOG(DEBUG,
7407                                             "QinQ tunnel filter already created.");
7408                         pf->qinq_replace_flag = 1;
7409                 }
7410                 /*      Add in the General fields the values of
7411                  *      the Outer and Inner VLAN
7412                  *      Big Buffer should be set, see changes in
7413                  *      i40e_aq_add_cloud_filters
7414                  */
7415                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
7416                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
7417                 big_buffer = 1;
7418                 break;
7419         default:
7420                 /* Other tunnel types is not supported. */
7421                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7422                 rte_free(cld_filter);
7423                 return -EINVAL;
7424         }
7425
7426         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
7427                 pfilter->element.flags =
7428                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7429         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
7430                 pfilter->element.flags =
7431                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7432         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
7433                 pfilter->element.flags =
7434                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7435         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
7436                 pfilter->element.flags =
7437                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7438         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
7439                 pfilter->element.flags |=
7440                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
7441         else {
7442                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7443                                                 &pfilter->element.flags);
7444                 if (val < 0) {
7445                         rte_free(cld_filter);
7446                         return -EINVAL;
7447                 }
7448         }
7449
7450         pfilter->element.flags |= rte_cpu_to_le_16(
7451                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7452                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7453         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7454         pfilter->element.queue_number =
7455                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7456
7457         if (!tunnel_filter->is_to_vf)
7458                 vsi = pf->main_vsi;
7459         else {
7460                 if (tunnel_filter->vf_id >= pf->vf_num) {
7461                         PMD_DRV_LOG(ERR, "Invalid argument.");
7462                         return -EINVAL;
7463                 }
7464                 vf = &pf->vfs[tunnel_filter->vf_id];
7465                 vsi = vf->vsi;
7466         }
7467
7468         /* Check if there is the filter in SW list */
7469         memset(&check_filter, 0, sizeof(check_filter));
7470         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7471         check_filter.is_to_vf = tunnel_filter->is_to_vf;
7472         check_filter.vf_id = tunnel_filter->vf_id;
7473         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7474         if (add && node) {
7475                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7476                 return -EINVAL;
7477         }
7478
7479         if (!add && !node) {
7480                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7481                 return -EINVAL;
7482         }
7483
7484         if (add) {
7485                 if (big_buffer)
7486                         ret = i40e_aq_add_cloud_filters_big_buffer(hw,
7487                                                    vsi->seid, cld_filter, 1);
7488                 else
7489                         ret = i40e_aq_add_cloud_filters(hw,
7490                                         vsi->seid, &cld_filter->element, 1);
7491                 if (ret < 0) {
7492                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7493                         return -ENOTSUP;
7494                 }
7495                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7496                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7497                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7498         } else {
7499                 if (big_buffer)
7500                         ret = i40e_aq_remove_cloud_filters_big_buffer(
7501                                 hw, vsi->seid, cld_filter, 1);
7502                 else
7503                         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7504                                                    &cld_filter->element, 1);
7505                 if (ret < 0) {
7506                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7507                         return -ENOTSUP;
7508                 }
7509                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7510         }
7511
7512         rte_free(cld_filter);
7513         return ret;
7514 }
7515
7516 static int
7517 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
7518 {
7519         uint8_t i;
7520
7521         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7522                 if (pf->vxlan_ports[i] == port)
7523                         return i;
7524         }
7525
7526         return -1;
7527 }
7528
7529 static int
7530 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
7531 {
7532         int  idx, ret;
7533         uint8_t filter_idx;
7534         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7535
7536         idx = i40e_get_vxlan_port_idx(pf, port);
7537
7538         /* Check if port already exists */
7539         if (idx >= 0) {
7540                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
7541                 return -EINVAL;
7542         }
7543
7544         /* Now check if there is space to add the new port */
7545         idx = i40e_get_vxlan_port_idx(pf, 0);
7546         if (idx < 0) {
7547                 PMD_DRV_LOG(ERR,
7548                         "Maximum number of UDP ports reached, not adding port %d",
7549                         port);
7550                 return -ENOSPC;
7551         }
7552
7553         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
7554                                         &filter_idx, NULL);
7555         if (ret < 0) {
7556                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
7557                 return -1;
7558         }
7559
7560         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
7561                          port,  filter_idx);
7562
7563         /* New port: add it and mark its index in the bitmap */
7564         pf->vxlan_ports[idx] = port;
7565         pf->vxlan_bitmap |= (1 << idx);
7566
7567         if (!(pf->flags & I40E_FLAG_VXLAN))
7568                 pf->flags |= I40E_FLAG_VXLAN;
7569
7570         return 0;
7571 }
7572
7573 static int
7574 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
7575 {
7576         int idx;
7577         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7578
7579         if (!(pf->flags & I40E_FLAG_VXLAN)) {
7580                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
7581                 return -EINVAL;
7582         }
7583
7584         idx = i40e_get_vxlan_port_idx(pf, port);
7585
7586         if (idx < 0) {
7587                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
7588                 return -EINVAL;
7589         }
7590
7591         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
7592                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
7593                 return -1;
7594         }
7595
7596         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
7597                         port, idx);
7598
7599         pf->vxlan_ports[idx] = 0;
7600         pf->vxlan_bitmap &= ~(1 << idx);
7601
7602         if (!pf->vxlan_bitmap)
7603                 pf->flags &= ~I40E_FLAG_VXLAN;
7604
7605         return 0;
7606 }
7607
7608 /* Add UDP tunneling port */
7609 static int
7610 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
7611                              struct rte_eth_udp_tunnel *udp_tunnel)
7612 {
7613         int ret = 0;
7614         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7615
7616         if (udp_tunnel == NULL)
7617                 return -EINVAL;
7618
7619         switch (udp_tunnel->prot_type) {
7620         case RTE_TUNNEL_TYPE_VXLAN:
7621                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
7622                 break;
7623
7624         case RTE_TUNNEL_TYPE_GENEVE:
7625         case RTE_TUNNEL_TYPE_TEREDO:
7626                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7627                 ret = -1;
7628                 break;
7629
7630         default:
7631                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7632                 ret = -1;
7633                 break;
7634         }
7635
7636         return ret;
7637 }
7638
7639 /* Remove UDP tunneling port */
7640 static int
7641 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
7642                              struct rte_eth_udp_tunnel *udp_tunnel)
7643 {
7644         int ret = 0;
7645         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7646
7647         if (udp_tunnel == NULL)
7648                 return -EINVAL;
7649
7650         switch (udp_tunnel->prot_type) {
7651         case RTE_TUNNEL_TYPE_VXLAN:
7652                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
7653                 break;
7654         case RTE_TUNNEL_TYPE_GENEVE:
7655         case RTE_TUNNEL_TYPE_TEREDO:
7656                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7657                 ret = -1;
7658                 break;
7659         default:
7660                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7661                 ret = -1;
7662                 break;
7663         }
7664
7665         return ret;
7666 }
7667
7668 /* Calculate the maximum number of contiguous PF queues that are configured */
7669 static int
7670 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
7671 {
7672         struct rte_eth_dev_data *data = pf->dev_data;
7673         int i, num;
7674         struct i40e_rx_queue *rxq;
7675
7676         num = 0;
7677         for (i = 0; i < pf->lan_nb_qps; i++) {
7678                 rxq = data->rx_queues[i];
7679                 if (rxq && rxq->q_set)
7680                         num++;
7681                 else
7682                         break;
7683         }
7684
7685         return num;
7686 }
7687
7688 /* Configure RSS */
7689 static int
7690 i40e_pf_config_rss(struct i40e_pf *pf)
7691 {
7692         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7693         struct rte_eth_rss_conf rss_conf;
7694         uint32_t i, lut = 0;
7695         uint16_t j, num;
7696
7697         /*
7698          * If both VMDQ and RSS enabled, not all of PF queues are configured.
7699          * It's necessary to calculate the actual PF queues that are configured.
7700          */
7701         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
7702                 num = i40e_pf_calc_configured_queues_num(pf);
7703         else
7704                 num = pf->dev_data->nb_rx_queues;
7705
7706         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
7707         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
7708                         num);
7709
7710         if (num == 0) {
7711                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
7712                 return -ENOTSUP;
7713         }
7714
7715         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
7716                 if (j == num)
7717                         j = 0;
7718                 lut = (lut << 8) | (j & ((0x1 <<
7719                         hw->func_caps.rss_table_entry_width) - 1));
7720                 if ((i & 3) == 3)
7721                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
7722         }
7723
7724         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
7725         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
7726                 i40e_pf_disable_rss(pf);
7727                 return 0;
7728         }
7729         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
7730                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
7731                 /* Random default keys */
7732                 static uint32_t rss_key_default[] = {0x6b793944,
7733                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
7734                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
7735                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
7736
7737                 rss_conf.rss_key = (uint8_t *)rss_key_default;
7738                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7739                                                         sizeof(uint32_t);
7740         }
7741
7742         return i40e_hw_rss_hash_set(pf, &rss_conf);
7743 }
7744
7745 static int
7746 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
7747                                struct rte_eth_tunnel_filter_conf *filter)
7748 {
7749         if (pf == NULL || filter == NULL) {
7750                 PMD_DRV_LOG(ERR, "Invalid parameter");
7751                 return -EINVAL;
7752         }
7753
7754         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
7755                 PMD_DRV_LOG(ERR, "Invalid queue ID");
7756                 return -EINVAL;
7757         }
7758
7759         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
7760                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
7761                 return -EINVAL;
7762         }
7763
7764         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
7765                 (is_zero_ether_addr(&filter->outer_mac))) {
7766                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
7767                 return -EINVAL;
7768         }
7769
7770         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
7771                 (is_zero_ether_addr(&filter->inner_mac))) {
7772                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
7773                 return -EINVAL;
7774         }
7775
7776         return 0;
7777 }
7778
7779 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
7780 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
7781 static int
7782 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
7783 {
7784         uint32_t val, reg;
7785         int ret = -EINVAL;
7786
7787         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
7788         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
7789
7790         if (len == 3) {
7791                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
7792         } else if (len == 4) {
7793                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
7794         } else {
7795                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
7796                 return ret;
7797         }
7798
7799         if (reg != val) {
7800                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
7801                                                    reg, NULL);
7802                 if (ret != 0)
7803                         return ret;
7804         } else {
7805                 ret = 0;
7806         }
7807         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
7808                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
7809
7810         return ret;
7811 }
7812
7813 static int
7814 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
7815 {
7816         int ret = -EINVAL;
7817
7818         if (!hw || !cfg)
7819                 return -EINVAL;
7820
7821         switch (cfg->cfg_type) {
7822         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
7823                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
7824                 break;
7825         default:
7826                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
7827                 break;
7828         }
7829
7830         return ret;
7831 }
7832
7833 static int
7834 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
7835                                enum rte_filter_op filter_op,
7836                                void *arg)
7837 {
7838         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7839         int ret = I40E_ERR_PARAM;
7840
7841         switch (filter_op) {
7842         case RTE_ETH_FILTER_SET:
7843                 ret = i40e_dev_global_config_set(hw,
7844                         (struct rte_eth_global_cfg *)arg);
7845                 break;
7846         default:
7847                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
7848                 break;
7849         }
7850
7851         return ret;
7852 }
7853
7854 static int
7855 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
7856                           enum rte_filter_op filter_op,
7857                           void *arg)
7858 {
7859         struct rte_eth_tunnel_filter_conf *filter;
7860         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7861         int ret = I40E_SUCCESS;
7862
7863         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
7864
7865         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
7866                 return I40E_ERR_PARAM;
7867
7868         switch (filter_op) {
7869         case RTE_ETH_FILTER_NOP:
7870                 if (!(pf->flags & I40E_FLAG_VXLAN))
7871                         ret = I40E_NOT_SUPPORTED;
7872                 break;
7873         case RTE_ETH_FILTER_ADD:
7874                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
7875                 break;
7876         case RTE_ETH_FILTER_DELETE:
7877                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
7878                 break;
7879         default:
7880                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
7881                 ret = I40E_ERR_PARAM;
7882                 break;
7883         }
7884
7885         return ret;
7886 }
7887
7888 static int
7889 i40e_pf_config_mq_rx(struct i40e_pf *pf)
7890 {
7891         int ret = 0;
7892         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
7893
7894         /* RSS setup */
7895         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
7896                 ret = i40e_pf_config_rss(pf);
7897         else
7898                 i40e_pf_disable_rss(pf);
7899
7900         return ret;
7901 }
7902
7903 /* Get the symmetric hash enable configurations per port */
7904 static void
7905 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
7906 {
7907         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
7908
7909         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
7910 }
7911
7912 /* Set the symmetric hash enable configurations per port */
7913 static void
7914 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
7915 {
7916         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
7917
7918         if (enable > 0) {
7919                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
7920                         PMD_DRV_LOG(INFO,
7921                                 "Symmetric hash has already been enabled");
7922                         return;
7923                 }
7924                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
7925         } else {
7926                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
7927                         PMD_DRV_LOG(INFO,
7928                                 "Symmetric hash has already been disabled");
7929                         return;
7930                 }
7931                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
7932         }
7933         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
7934         I40E_WRITE_FLUSH(hw);
7935 }
7936
7937 /*
7938  * Get global configurations of hash function type and symmetric hash enable
7939  * per flow type (pctype). Note that global configuration means it affects all
7940  * the ports on the same NIC.
7941  */
7942 static int
7943 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
7944                                    struct rte_eth_hash_global_conf *g_cfg)
7945 {
7946         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
7947         uint32_t reg;
7948         uint16_t i, j;
7949
7950         memset(g_cfg, 0, sizeof(*g_cfg));
7951         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
7952         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
7953                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
7954         else
7955                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
7956         PMD_DRV_LOG(DEBUG, "Hash function is %s",
7957                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
7958
7959         /*
7960          * We work only with lowest 32 bits which is not correct, but to work
7961          * properly the valid_bit_mask size should be increased up to 64 bits
7962          * and this will brake ABI. This modification will be done in next
7963          * release
7964          */
7965         g_cfg->valid_bit_mask[0] = (uint32_t)adapter->flow_types_mask;
7966
7967         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT32_BIT; i++) {
7968                 if (!adapter->pctypes_tbl[i])
7969                         continue;
7970                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
7971                      j < I40E_FILTER_PCTYPE_MAX; j++) {
7972                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
7973                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
7974                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
7975                                         g_cfg->sym_hash_enable_mask[0] |=
7976                                                                 (1UL << i);
7977                                 }
7978                         }
7979                 }
7980         }
7981
7982         return 0;
7983 }
7984
7985 static int
7986 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
7987                               const struct rte_eth_hash_global_conf *g_cfg)
7988 {
7989         uint32_t i;
7990         uint32_t mask0, i40e_mask = adapter->flow_types_mask;
7991
7992         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
7993                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
7994                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
7995                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
7996                                                 g_cfg->hash_func);
7997                 return -EINVAL;
7998         }
7999
8000         /*
8001          * As i40e supports less than 32 flow types, only first 32 bits need to
8002          * be checked.
8003          */
8004         mask0 = g_cfg->valid_bit_mask[0];
8005         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8006                 if (i == 0) {
8007                         /* Check if any unsupported flow type configured */
8008                         if ((mask0 | i40e_mask) ^ i40e_mask)
8009                                 goto mask_err;
8010                 } else {
8011                         if (g_cfg->valid_bit_mask[i])
8012                                 goto mask_err;
8013                 }
8014         }
8015
8016         return 0;
8017
8018 mask_err:
8019         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
8020
8021         return -EINVAL;
8022 }
8023
8024 /*
8025  * Set global configurations of hash function type and symmetric hash enable
8026  * per flow type (pctype). Note any modifying global configuration will affect
8027  * all the ports on the same NIC.
8028  */
8029 static int
8030 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
8031                                    struct rte_eth_hash_global_conf *g_cfg)
8032 {
8033         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8034         int ret;
8035         uint16_t i, j;
8036         uint32_t reg;
8037         /*
8038          * We work only with lowest 32 bits which is not correct, but to work
8039          * properly the valid_bit_mask size should be increased up to 64 bits
8040          * and this will brake ABI. This modification will be done in next
8041          * release
8042          */
8043         uint32_t mask0 = g_cfg->valid_bit_mask[0] &
8044                                         (uint32_t)adapter->flow_types_mask;
8045
8046         /* Check the input parameters */
8047         ret = i40e_hash_global_config_check(adapter, g_cfg);
8048         if (ret < 0)
8049                 return ret;
8050
8051         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT32_BIT; i++) {
8052                 if (mask0 & (1UL << i)) {
8053                         reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
8054                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
8055
8056                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8057                              j < I40E_FILTER_PCTYPE_MAX; j++) {
8058                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
8059                                         i40e_write_rx_ctl(hw,
8060                                                           I40E_GLQF_HSYM(j),
8061                                                           reg);
8062                         }
8063                 }
8064         }
8065
8066         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8067         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
8068                 /* Toeplitz */
8069                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
8070                         PMD_DRV_LOG(DEBUG,
8071                                 "Hash function already set to Toeplitz");
8072                         goto out;
8073                 }
8074                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
8075         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
8076                 /* Simple XOR */
8077                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
8078                         PMD_DRV_LOG(DEBUG,
8079                                 "Hash function already set to Simple XOR");
8080                         goto out;
8081                 }
8082                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
8083         } else
8084                 /* Use the default, and keep it as it is */
8085                 goto out;
8086
8087         i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
8088
8089 out:
8090         I40E_WRITE_FLUSH(hw);
8091
8092         return 0;
8093 }
8094
8095 /**
8096  * Valid input sets for hash and flow director filters per PCTYPE
8097  */
8098 static uint64_t
8099 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8100                 enum rte_filter_type filter)
8101 {
8102         uint64_t valid;
8103
8104         static const uint64_t valid_hash_inset_table[] = {
8105                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8106                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8107                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8108                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8109                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8110                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8111                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8112                         I40E_INSET_FLEX_PAYLOAD,
8113                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8114                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8115                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8116                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8117                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8118                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8119                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8120                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8121                         I40E_INSET_FLEX_PAYLOAD,
8122                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8123                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8124                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8125                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8126                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8127                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8128                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8129                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8130                         I40E_INSET_FLEX_PAYLOAD,
8131                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8132                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8133                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8134                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8135                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8136                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8137                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8138                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8139                         I40E_INSET_FLEX_PAYLOAD,
8140                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8141                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8142                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8143                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8144                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8145                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8146                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8147                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8148                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8149                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8150                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8151                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8152                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8153                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8154                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8155                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8156                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8157                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8158                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8159                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8160                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8161                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8162                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8163                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8164                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8165                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8166                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
8167                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8168                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8169                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8170                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8171                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8172                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8173                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8174                         I40E_INSET_FLEX_PAYLOAD,
8175                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8176                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8177                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8178                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8179                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8180                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
8181                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
8182                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
8183                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8184                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8185                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8186                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8187                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8188                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8189                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8190                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
8191                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8192                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8193                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8194                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8195                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8196                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8197                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8198                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8199                         I40E_INSET_FLEX_PAYLOAD,
8200                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8201                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8202                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8203                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8204                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8205                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8206                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8207                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8208                         I40E_INSET_FLEX_PAYLOAD,
8209                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8210                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8211                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8212                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8213                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8214                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8215                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8216                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8217                         I40E_INSET_FLEX_PAYLOAD,
8218                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8219                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8220                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8221                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8222                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8223                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8224                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8225                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8226                         I40E_INSET_FLEX_PAYLOAD,
8227                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8228                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8229                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8230                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8231                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8232                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8233                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8234                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8235                         I40E_INSET_FLEX_PAYLOAD,
8236                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8237                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8238                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8239                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8240                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8241                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8242                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8243                         I40E_INSET_FLEX_PAYLOAD,
8244                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8245                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8246                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8247                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8248                         I40E_INSET_FLEX_PAYLOAD,
8249         };
8250
8251         /**
8252          * Flow director supports only fields defined in
8253          * union rte_eth_fdir_flow.
8254          */
8255         static const uint64_t valid_fdir_inset_table[] = {
8256                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8257                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8258                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8259                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8260                 I40E_INSET_IPV4_TTL,
8261                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8262                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8263                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8264                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8265                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8266                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8267                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8268                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8269                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8270                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8271                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8272                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8273                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8274                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8275                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8276                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8277                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8278                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8279                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8280                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8281                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8282                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8283                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8284                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8285                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8286                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8287                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8288                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8289                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8290                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8291                 I40E_INSET_SCTP_VT,
8292                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8293                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8294                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8295                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8296                 I40E_INSET_IPV4_TTL,
8297                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8298                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8299                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8300                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8301                 I40E_INSET_IPV6_HOP_LIMIT,
8302                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8303                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8304                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8305                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8306                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8307                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8308                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8309                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8310                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8311                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8312                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8313                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8314                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8315                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8316                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8317                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8318                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8319                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8320                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8321                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8322                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8323                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8324                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8325                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8326                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8327                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8328                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8329                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8330                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8331                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8332                 I40E_INSET_SCTP_VT,
8333                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8334                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8335                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8336                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8337                 I40E_INSET_IPV6_HOP_LIMIT,
8338                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8339                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8340                 I40E_INSET_LAST_ETHER_TYPE,
8341         };
8342
8343         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8344                 return 0;
8345         if (filter == RTE_ETH_FILTER_HASH)
8346                 valid = valid_hash_inset_table[pctype];
8347         else
8348                 valid = valid_fdir_inset_table[pctype];
8349
8350         return valid;
8351 }
8352
8353 /**
8354  * Validate if the input set is allowed for a specific PCTYPE
8355  */
8356 int
8357 i40e_validate_input_set(enum i40e_filter_pctype pctype,
8358                 enum rte_filter_type filter, uint64_t inset)
8359 {
8360         uint64_t valid;
8361
8362         valid = i40e_get_valid_input_set(pctype, filter);
8363         if (inset & (~valid))
8364                 return -EINVAL;
8365
8366         return 0;
8367 }
8368
8369 /* default input set fields combination per pctype */
8370 uint64_t
8371 i40e_get_default_input_set(uint16_t pctype)
8372 {
8373         static const uint64_t default_inset_table[] = {
8374                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8375                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8376                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8377                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8378                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8379                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8380                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8381                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8382                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8383                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8384                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8385                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8386                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8387                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8388                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8389                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8390                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8391                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8392                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8393                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8394                         I40E_INSET_SCTP_VT,
8395                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8396                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8397                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8398                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8399                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8400                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8401                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8402                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8403                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8404                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8405                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8406                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8407                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8408                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8409                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8410                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8411                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8412                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8413                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8414                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8415                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8416                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8417                         I40E_INSET_SCTP_VT,
8418                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8419                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8420                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8421                         I40E_INSET_LAST_ETHER_TYPE,
8422         };
8423
8424         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8425                 return 0;
8426
8427         return default_inset_table[pctype];
8428 }
8429
8430 /**
8431  * Parse the input set from index to logical bit masks
8432  */
8433 static int
8434 i40e_parse_input_set(uint64_t *inset,
8435                      enum i40e_filter_pctype pctype,
8436                      enum rte_eth_input_set_field *field,
8437                      uint16_t size)
8438 {
8439         uint16_t i, j;
8440         int ret = -EINVAL;
8441
8442         static const struct {
8443                 enum rte_eth_input_set_field field;
8444                 uint64_t inset;
8445         } inset_convert_table[] = {
8446                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
8447                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
8448                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
8449                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
8450                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
8451                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
8452                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
8453                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
8454                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
8455                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
8456                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
8457                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
8458                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
8459                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
8460                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
8461                         I40E_INSET_IPV6_NEXT_HDR},
8462                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
8463                         I40E_INSET_IPV6_HOP_LIMIT},
8464                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
8465                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
8466                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
8467                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
8468                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
8469                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
8470                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
8471                         I40E_INSET_SCTP_VT},
8472                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
8473                         I40E_INSET_TUNNEL_DMAC},
8474                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
8475                         I40E_INSET_VLAN_TUNNEL},
8476                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
8477                         I40E_INSET_TUNNEL_ID},
8478                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
8479                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
8480                         I40E_INSET_FLEX_PAYLOAD_W1},
8481                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
8482                         I40E_INSET_FLEX_PAYLOAD_W2},
8483                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
8484                         I40E_INSET_FLEX_PAYLOAD_W3},
8485                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
8486                         I40E_INSET_FLEX_PAYLOAD_W4},
8487                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
8488                         I40E_INSET_FLEX_PAYLOAD_W5},
8489                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
8490                         I40E_INSET_FLEX_PAYLOAD_W6},
8491                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
8492                         I40E_INSET_FLEX_PAYLOAD_W7},
8493                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
8494                         I40E_INSET_FLEX_PAYLOAD_W8},
8495         };
8496
8497         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
8498                 return ret;
8499
8500         /* Only one item allowed for default or all */
8501         if (size == 1) {
8502                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
8503                         *inset = i40e_get_default_input_set(pctype);
8504                         return 0;
8505                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
8506                         *inset = I40E_INSET_NONE;
8507                         return 0;
8508                 }
8509         }
8510
8511         for (i = 0, *inset = 0; i < size; i++) {
8512                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
8513                         if (field[i] == inset_convert_table[j].field) {
8514                                 *inset |= inset_convert_table[j].inset;
8515                                 break;
8516                         }
8517                 }
8518
8519                 /* It contains unsupported input set, return immediately */
8520                 if (j == RTE_DIM(inset_convert_table))
8521                         return ret;
8522         }
8523
8524         return 0;
8525 }
8526
8527 /**
8528  * Translate the input set from bit masks to register aware bit masks
8529  * and vice versa
8530  */
8531 uint64_t
8532 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
8533 {
8534         uint64_t val = 0;
8535         uint16_t i;
8536
8537         struct inset_map {
8538                 uint64_t inset;
8539                 uint64_t inset_reg;
8540         };
8541
8542         static const struct inset_map inset_map_common[] = {
8543                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
8544                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
8545                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
8546                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
8547                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
8548                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
8549                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
8550                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
8551                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
8552                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
8553                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
8554                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
8555                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
8556                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
8557                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
8558                 {I40E_INSET_TUNNEL_DMAC,
8559                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
8560                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
8561                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
8562                 {I40E_INSET_TUNNEL_SRC_PORT,
8563                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
8564                 {I40E_INSET_TUNNEL_DST_PORT,
8565                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
8566                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
8567                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
8568                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
8569                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
8570                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
8571                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
8572                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
8573                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
8574                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
8575         };
8576
8577     /* some different registers map in x722*/
8578         static const struct inset_map inset_map_diff_x722[] = {
8579                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
8580                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
8581                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
8582                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
8583         };
8584
8585         static const struct inset_map inset_map_diff_not_x722[] = {
8586                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
8587                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
8588                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
8589                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
8590         };
8591
8592         if (input == 0)
8593                 return val;
8594
8595         /* Translate input set to register aware inset */
8596         if (type == I40E_MAC_X722) {
8597                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
8598                         if (input & inset_map_diff_x722[i].inset)
8599                                 val |= inset_map_diff_x722[i].inset_reg;
8600                 }
8601         } else {
8602                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
8603                         if (input & inset_map_diff_not_x722[i].inset)
8604                                 val |= inset_map_diff_not_x722[i].inset_reg;
8605                 }
8606         }
8607
8608         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
8609                 if (input & inset_map_common[i].inset)
8610                         val |= inset_map_common[i].inset_reg;
8611         }
8612
8613         return val;
8614 }
8615
8616 int
8617 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
8618 {
8619         uint8_t i, idx = 0;
8620         uint64_t inset_need_mask = inset;
8621
8622         static const struct {
8623                 uint64_t inset;
8624                 uint32_t mask;
8625         } inset_mask_map[] = {
8626                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
8627                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
8628                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
8629                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
8630                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
8631                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
8632                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
8633                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
8634         };
8635
8636         if (!inset || !mask || !nb_elem)
8637                 return 0;
8638
8639         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
8640                 /* Clear the inset bit, if no MASK is required,
8641                  * for example proto + ttl
8642                  */
8643                 if ((inset & inset_mask_map[i].inset) ==
8644                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
8645                         inset_need_mask &= ~inset_mask_map[i].inset;
8646                 if (!inset_need_mask)
8647                         return 0;
8648         }
8649         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
8650                 if ((inset_need_mask & inset_mask_map[i].inset) ==
8651                     inset_mask_map[i].inset) {
8652                         if (idx >= nb_elem) {
8653                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
8654                                 return -EINVAL;
8655                         }
8656                         mask[idx] = inset_mask_map[i].mask;
8657                         idx++;
8658                 }
8659         }
8660
8661         return idx;
8662 }
8663
8664 void
8665 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
8666 {
8667         uint32_t reg = i40e_read_rx_ctl(hw, addr);
8668
8669         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
8670         if (reg != val)
8671                 i40e_write_rx_ctl(hw, addr, val);
8672         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
8673                     (uint32_t)i40e_read_rx_ctl(hw, addr));
8674 }
8675
8676 static void
8677 i40e_filter_input_set_init(struct i40e_pf *pf)
8678 {
8679         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8680         enum i40e_filter_pctype pctype;
8681         uint64_t input_set, inset_reg;
8682         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8683         int num, i;
8684         uint16_t flow_type;
8685
8686         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
8687              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
8688                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
8689
8690                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
8691                         continue;
8692
8693                 input_set = i40e_get_default_input_set(pctype);
8694
8695                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8696                                                    I40E_INSET_MASK_NUM_REG);
8697                 if (num < 0)
8698                         return;
8699                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
8700                                         input_set);
8701
8702                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
8703                                       (uint32_t)(inset_reg & UINT32_MAX));
8704                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
8705                                      (uint32_t)((inset_reg >>
8706                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
8707                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
8708                                       (uint32_t)(inset_reg & UINT32_MAX));
8709                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
8710                                      (uint32_t)((inset_reg >>
8711                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
8712
8713                 for (i = 0; i < num; i++) {
8714                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8715                                              mask_reg[i]);
8716                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8717                                              mask_reg[i]);
8718                 }
8719                 /*clear unused mask registers of the pctype */
8720                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
8721                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8722                                              0);
8723                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8724                                              0);
8725                 }
8726                 I40E_WRITE_FLUSH(hw);
8727
8728                 /* store the default input set */
8729                 pf->hash_input_set[pctype] = input_set;
8730                 pf->fdir.input_set[pctype] = input_set;
8731         }
8732 }
8733
8734 int
8735 i40e_hash_filter_inset_select(struct i40e_hw *hw,
8736                          struct rte_eth_input_set_conf *conf)
8737 {
8738         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8739         enum i40e_filter_pctype pctype;
8740         uint64_t input_set, inset_reg = 0;
8741         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8742         int ret, i, num;
8743
8744         if (!conf) {
8745                 PMD_DRV_LOG(ERR, "Invalid pointer");
8746                 return -EFAULT;
8747         }
8748         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
8749             conf->op != RTE_ETH_INPUT_SET_ADD) {
8750                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
8751                 return -EINVAL;
8752         }
8753
8754         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
8755         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
8756                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
8757                 return -EINVAL;
8758         }
8759
8760         if (hw->mac.type == I40E_MAC_X722) {
8761                 /* get translated pctype value in fd pctype register */
8762                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
8763                         I40E_GLQF_FD_PCTYPES((int)pctype));
8764         }
8765
8766         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
8767                                    conf->inset_size);
8768         if (ret) {
8769                 PMD_DRV_LOG(ERR, "Failed to parse input set");
8770                 return -EINVAL;
8771         }
8772
8773         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
8774                 /* get inset value in register */
8775                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
8776                 inset_reg <<= I40E_32_BIT_WIDTH;
8777                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
8778                 input_set |= pf->hash_input_set[pctype];
8779         }
8780         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8781                                            I40E_INSET_MASK_NUM_REG);
8782         if (num < 0)
8783                 return -EINVAL;
8784
8785         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
8786
8787         i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
8788                               (uint32_t)(inset_reg & UINT32_MAX));
8789         i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
8790                              (uint32_t)((inset_reg >>
8791                              I40E_32_BIT_WIDTH) & UINT32_MAX));
8792
8793         for (i = 0; i < num; i++)
8794                 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8795                                      mask_reg[i]);
8796         /*clear unused mask registers of the pctype */
8797         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
8798                 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8799                                      0);
8800         I40E_WRITE_FLUSH(hw);
8801
8802         pf->hash_input_set[pctype] = input_set;
8803         return 0;
8804 }
8805
8806 int
8807 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
8808                          struct rte_eth_input_set_conf *conf)
8809 {
8810         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8811         enum i40e_filter_pctype pctype;
8812         uint64_t input_set, inset_reg = 0;
8813         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8814         int ret, i, num;
8815
8816         if (!hw || !conf) {
8817                 PMD_DRV_LOG(ERR, "Invalid pointer");
8818                 return -EFAULT;
8819         }
8820         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
8821             conf->op != RTE_ETH_INPUT_SET_ADD) {
8822                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
8823                 return -EINVAL;
8824         }
8825
8826         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
8827
8828         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
8829                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
8830                 return -EINVAL;
8831         }
8832
8833         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
8834                                    conf->inset_size);
8835         if (ret) {
8836                 PMD_DRV_LOG(ERR, "Failed to parse input set");
8837                 return -EINVAL;
8838         }
8839
8840         /* get inset value in register */
8841         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
8842         inset_reg <<= I40E_32_BIT_WIDTH;
8843         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
8844
8845         /* Can not change the inset reg for flex payload for fdir,
8846          * it is done by writing I40E_PRTQF_FD_FLXINSET
8847          * in i40e_set_flex_mask_on_pctype.
8848          */
8849         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
8850                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
8851         else
8852                 input_set |= pf->fdir.input_set[pctype];
8853         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8854                                            I40E_INSET_MASK_NUM_REG);
8855         if (num < 0)
8856                 return -EINVAL;
8857
8858         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
8859
8860         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
8861                               (uint32_t)(inset_reg & UINT32_MAX));
8862         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
8863                              (uint32_t)((inset_reg >>
8864                              I40E_32_BIT_WIDTH) & UINT32_MAX));
8865
8866         for (i = 0; i < num; i++)
8867                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8868                                      mask_reg[i]);
8869         /*clear unused mask registers of the pctype */
8870         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
8871                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8872                                      0);
8873         I40E_WRITE_FLUSH(hw);
8874
8875         pf->fdir.input_set[pctype] = input_set;
8876         return 0;
8877 }
8878
8879 static int
8880 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
8881 {
8882         int ret = 0;
8883
8884         if (!hw || !info) {
8885                 PMD_DRV_LOG(ERR, "Invalid pointer");
8886                 return -EFAULT;
8887         }
8888
8889         switch (info->info_type) {
8890         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
8891                 i40e_get_symmetric_hash_enable_per_port(hw,
8892                                         &(info->info.enable));
8893                 break;
8894         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
8895                 ret = i40e_get_hash_filter_global_config(hw,
8896                                 &(info->info.global_conf));
8897                 break;
8898         default:
8899                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
8900                                                         info->info_type);
8901                 ret = -EINVAL;
8902                 break;
8903         }
8904
8905         return ret;
8906 }
8907
8908 static int
8909 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
8910 {
8911         int ret = 0;
8912
8913         if (!hw || !info) {
8914                 PMD_DRV_LOG(ERR, "Invalid pointer");
8915                 return -EFAULT;
8916         }
8917
8918         switch (info->info_type) {
8919         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
8920                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
8921                 break;
8922         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
8923                 ret = i40e_set_hash_filter_global_config(hw,
8924                                 &(info->info.global_conf));
8925                 break;
8926         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
8927                 ret = i40e_hash_filter_inset_select(hw,
8928                                                &(info->info.input_set_conf));
8929                 break;
8930
8931         default:
8932                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
8933                                                         info->info_type);
8934                 ret = -EINVAL;
8935                 break;
8936         }
8937
8938         return ret;
8939 }
8940
8941 /* Operations for hash function */
8942 static int
8943 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
8944                       enum rte_filter_op filter_op,
8945                       void *arg)
8946 {
8947         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8948         int ret = 0;
8949
8950         switch (filter_op) {
8951         case RTE_ETH_FILTER_NOP:
8952                 break;
8953         case RTE_ETH_FILTER_GET:
8954                 ret = i40e_hash_filter_get(hw,
8955                         (struct rte_eth_hash_filter_info *)arg);
8956                 break;
8957         case RTE_ETH_FILTER_SET:
8958                 ret = i40e_hash_filter_set(hw,
8959                         (struct rte_eth_hash_filter_info *)arg);
8960                 break;
8961         default:
8962                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
8963                                                                 filter_op);
8964                 ret = -ENOTSUP;
8965                 break;
8966         }
8967
8968         return ret;
8969 }
8970
8971 /* Convert ethertype filter structure */
8972 static int
8973 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
8974                               struct i40e_ethertype_filter *filter)
8975 {
8976         rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
8977         filter->input.ether_type = input->ether_type;
8978         filter->flags = input->flags;
8979         filter->queue = input->queue;
8980
8981         return 0;
8982 }
8983
8984 /* Check if there exists the ehtertype filter */
8985 struct i40e_ethertype_filter *
8986 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
8987                                 const struct i40e_ethertype_filter_input *input)
8988 {
8989         int ret;
8990
8991         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
8992         if (ret < 0)
8993                 return NULL;
8994
8995         return ethertype_rule->hash_map[ret];
8996 }
8997
8998 /* Add ethertype filter in SW list */
8999 static int
9000 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9001                                 struct i40e_ethertype_filter *filter)
9002 {
9003         struct i40e_ethertype_rule *rule = &pf->ethertype;
9004         int ret;
9005
9006         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9007         if (ret < 0) {
9008                 PMD_DRV_LOG(ERR,
9009                             "Failed to insert ethertype filter"
9010                             " to hash table %d!",
9011                             ret);
9012                 return ret;
9013         }
9014         rule->hash_map[ret] = filter;
9015
9016         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9017
9018         return 0;
9019 }
9020
9021 /* Delete ethertype filter in SW list */
9022 int
9023 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9024                              struct i40e_ethertype_filter_input *input)
9025 {
9026         struct i40e_ethertype_rule *rule = &pf->ethertype;
9027         struct i40e_ethertype_filter *filter;
9028         int ret;
9029
9030         ret = rte_hash_del_key(rule->hash_table, input);
9031         if (ret < 0) {
9032                 PMD_DRV_LOG(ERR,
9033                             "Failed to delete ethertype filter"
9034                             " to hash table %d!",
9035                             ret);
9036                 return ret;
9037         }
9038         filter = rule->hash_map[ret];
9039         rule->hash_map[ret] = NULL;
9040
9041         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9042         rte_free(filter);
9043
9044         return 0;
9045 }
9046
9047 /*
9048  * Configure ethertype filter, which can director packet by filtering
9049  * with mac address and ether_type or only ether_type
9050  */
9051 int
9052 i40e_ethertype_filter_set(struct i40e_pf *pf,
9053                         struct rte_eth_ethertype_filter *filter,
9054                         bool add)
9055 {
9056         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9057         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9058         struct i40e_ethertype_filter *ethertype_filter, *node;
9059         struct i40e_ethertype_filter check_filter;
9060         struct i40e_control_filter_stats stats;
9061         uint16_t flags = 0;
9062         int ret;
9063
9064         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9065                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9066                 return -EINVAL;
9067         }
9068         if (filter->ether_type == ETHER_TYPE_IPv4 ||
9069                 filter->ether_type == ETHER_TYPE_IPv6) {
9070                 PMD_DRV_LOG(ERR,
9071                         "unsupported ether_type(0x%04x) in control packet filter.",
9072                         filter->ether_type);
9073                 return -EINVAL;
9074         }
9075         if (filter->ether_type == ETHER_TYPE_VLAN)
9076                 PMD_DRV_LOG(WARNING,
9077                         "filter vlan ether_type in first tag is not supported.");
9078
9079         /* Check if there is the filter in SW list */
9080         memset(&check_filter, 0, sizeof(check_filter));
9081         i40e_ethertype_filter_convert(filter, &check_filter);
9082         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9083                                                &check_filter.input);
9084         if (add && node) {
9085                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9086                 return -EINVAL;
9087         }
9088
9089         if (!add && !node) {
9090                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9091                 return -EINVAL;
9092         }
9093
9094         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9095                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9096         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9097                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9098         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9099
9100         memset(&stats, 0, sizeof(stats));
9101         ret = i40e_aq_add_rem_control_packet_filter(hw,
9102                         filter->mac_addr.addr_bytes,
9103                         filter->ether_type, flags,
9104                         pf->main_vsi->seid,
9105                         filter->queue, add, &stats, NULL);
9106
9107         PMD_DRV_LOG(INFO,
9108                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9109                 ret, stats.mac_etype_used, stats.etype_used,
9110                 stats.mac_etype_free, stats.etype_free);
9111         if (ret < 0)
9112                 return -ENOSYS;
9113
9114         /* Add or delete a filter in SW list */
9115         if (add) {
9116                 ethertype_filter = rte_zmalloc("ethertype_filter",
9117                                        sizeof(*ethertype_filter), 0);
9118                 rte_memcpy(ethertype_filter, &check_filter,
9119                            sizeof(check_filter));
9120                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9121         } else {
9122                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9123         }
9124
9125         return ret;
9126 }
9127
9128 /*
9129  * Handle operations for ethertype filter.
9130  */
9131 static int
9132 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
9133                                 enum rte_filter_op filter_op,
9134                                 void *arg)
9135 {
9136         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9137         int ret = 0;
9138
9139         if (filter_op == RTE_ETH_FILTER_NOP)
9140                 return ret;
9141
9142         if (arg == NULL) {
9143                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
9144                             filter_op);
9145                 return -EINVAL;
9146         }
9147
9148         switch (filter_op) {
9149         case RTE_ETH_FILTER_ADD:
9150                 ret = i40e_ethertype_filter_set(pf,
9151                         (struct rte_eth_ethertype_filter *)arg,
9152                         TRUE);
9153                 break;
9154         case RTE_ETH_FILTER_DELETE:
9155                 ret = i40e_ethertype_filter_set(pf,
9156                         (struct rte_eth_ethertype_filter *)arg,
9157                         FALSE);
9158                 break;
9159         default:
9160                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
9161                 ret = -ENOSYS;
9162                 break;
9163         }
9164         return ret;
9165 }
9166
9167 static int
9168 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9169                      enum rte_filter_type filter_type,
9170                      enum rte_filter_op filter_op,
9171                      void *arg)
9172 {
9173         int ret = 0;
9174
9175         if (dev == NULL)
9176                 return -EINVAL;
9177
9178         switch (filter_type) {
9179         case RTE_ETH_FILTER_NONE:
9180                 /* For global configuration */
9181                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9182                 break;
9183         case RTE_ETH_FILTER_HASH:
9184                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9185                 break;
9186         case RTE_ETH_FILTER_MACVLAN:
9187                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9188                 break;
9189         case RTE_ETH_FILTER_ETHERTYPE:
9190                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9191                 break;
9192         case RTE_ETH_FILTER_TUNNEL:
9193                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9194                 break;
9195         case RTE_ETH_FILTER_FDIR:
9196                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9197                 break;
9198         case RTE_ETH_FILTER_GENERIC:
9199                 if (filter_op != RTE_ETH_FILTER_GET)
9200                         return -EINVAL;
9201                 *(const void **)arg = &i40e_flow_ops;
9202                 break;
9203         default:
9204                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9205                                                         filter_type);
9206                 ret = -EINVAL;
9207                 break;
9208         }
9209
9210         return ret;
9211 }
9212
9213 /*
9214  * Check and enable Extended Tag.
9215  * Enabling Extended Tag is important for 40G performance.
9216  */
9217 static void
9218 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9219 {
9220         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9221         uint32_t buf = 0;
9222         int ret;
9223
9224         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9225                                       PCI_DEV_CAP_REG);
9226         if (ret < 0) {
9227                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9228                             PCI_DEV_CAP_REG);
9229                 return;
9230         }
9231         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9232                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9233                 return;
9234         }
9235
9236         buf = 0;
9237         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9238                                       PCI_DEV_CTRL_REG);
9239         if (ret < 0) {
9240                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9241                             PCI_DEV_CTRL_REG);
9242                 return;
9243         }
9244         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9245                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9246                 return;
9247         }
9248         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9249         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9250                                        PCI_DEV_CTRL_REG);
9251         if (ret < 0) {
9252                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9253                             PCI_DEV_CTRL_REG);
9254                 return;
9255         }
9256 }
9257
9258 /*
9259  * As some registers wouldn't be reset unless a global hardware reset,
9260  * hardware initialization is needed to put those registers into an
9261  * expected initial state.
9262  */
9263 static void
9264 i40e_hw_init(struct rte_eth_dev *dev)
9265 {
9266         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9267
9268         i40e_enable_extended_tag(dev);
9269
9270         /* clear the PF Queue Filter control register */
9271         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9272
9273         /* Disable symmetric hash per port */
9274         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9275 }
9276
9277 /*
9278  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9279  * however this function will return only one highest pctype index,
9280  * which is not quite correct. This is known problem of i40e driver
9281  * and needs to be fixed later.
9282  */
9283 enum i40e_filter_pctype
9284 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9285 {
9286         int i;
9287         uint64_t pctype_mask;
9288
9289         if (flow_type < I40E_FLOW_TYPE_MAX) {
9290                 pctype_mask = adapter->pctypes_tbl[flow_type];
9291                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9292                         if (pctype_mask & (1ULL << i))
9293                                 return (enum i40e_filter_pctype)i;
9294                 }
9295         }
9296         return I40E_FILTER_PCTYPE_INVALID;
9297 }
9298
9299 uint16_t
9300 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9301                         enum i40e_filter_pctype pctype)
9302 {
9303         uint16_t flowtype;
9304         uint64_t pctype_mask = 1ULL << pctype;
9305
9306         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9307              flowtype++) {
9308                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9309                         return flowtype;
9310         }
9311
9312         return RTE_ETH_FLOW_UNKNOWN;
9313 }
9314
9315 /*
9316  * On X710, performance number is far from the expectation on recent firmware
9317  * versions; on XL710, performance number is also far from the expectation on
9318  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9319  * mode is enabled and port MAC address is equal to the packet destination MAC
9320  * address. The fix for this issue may not be integrated in the following
9321  * firmware version. So the workaround in software driver is needed. It needs
9322  * to modify the initial values of 3 internal only registers for both X710 and
9323  * XL710. Note that the values for X710 or XL710 could be different, and the
9324  * workaround can be removed when it is fixed in firmware in the future.
9325  */
9326
9327 /* For both X710 and XL710 */
9328 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
9329 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
9330 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
9331
9332 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9333 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9334
9335 /* For X722 */
9336 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9337 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9338
9339 /* For X710 */
9340 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
9341 /* For XL710 */
9342 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
9343 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
9344
9345 static int
9346 i40e_dev_sync_phy_type(struct i40e_hw *hw)
9347 {
9348         enum i40e_status_code status;
9349         struct i40e_aq_get_phy_abilities_resp phy_ab;
9350         int ret = -ENOTSUP;
9351         int retries = 0;
9352
9353         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
9354                                               NULL);
9355
9356         while (status) {
9357                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
9358                         status);
9359                 retries++;
9360                 rte_delay_us(100000);
9361                 if  (retries < 5)
9362                         status = i40e_aq_get_phy_capabilities(hw, false,
9363                                         true, &phy_ab, NULL);
9364                 else
9365                         return ret;
9366         }
9367         return 0;
9368 }
9369
9370 static void
9371 i40e_configure_registers(struct i40e_hw *hw)
9372 {
9373         static struct {
9374                 uint32_t addr;
9375                 uint64_t val;
9376         } reg_table[] = {
9377                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
9378                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
9379                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
9380         };
9381         uint64_t reg;
9382         uint32_t i;
9383         int ret;
9384
9385         for (i = 0; i < RTE_DIM(reg_table); i++) {
9386                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
9387                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9388                                 reg_table[i].val =
9389                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
9390                         else /* For X710/XL710/XXV710 */
9391                                 if (hw->aq.fw_maj_ver < 6)
9392                                         reg_table[i].val =
9393                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
9394                                 else
9395                                         reg_table[i].val =
9396                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
9397                 }
9398
9399                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
9400                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9401                                 reg_table[i].val =
9402                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9403                         else /* For X710/XL710/XXV710 */
9404                                 reg_table[i].val =
9405                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9406                 }
9407
9408                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
9409                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
9410                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
9411                                 reg_table[i].val =
9412                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
9413                         else /* For X710 */
9414                                 reg_table[i].val =
9415                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
9416                 }
9417
9418                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
9419                                                         &reg, NULL);
9420                 if (ret < 0) {
9421                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
9422                                                         reg_table[i].addr);
9423                         break;
9424                 }
9425                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
9426                                                 reg_table[i].addr, reg);
9427                 if (reg == reg_table[i].val)
9428                         continue;
9429
9430                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
9431                                                 reg_table[i].val, NULL);
9432                 if (ret < 0) {
9433                         PMD_DRV_LOG(ERR,
9434                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
9435                                 reg_table[i].val, reg_table[i].addr);
9436                         break;
9437                 }
9438                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
9439                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
9440         }
9441 }
9442
9443 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
9444 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
9445 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
9446 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
9447 static int
9448 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
9449 {
9450         uint32_t reg;
9451         int ret;
9452
9453         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
9454                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
9455                 return -EINVAL;
9456         }
9457
9458         /* Configure for double VLAN RX stripping */
9459         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
9460         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
9461                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
9462                 ret = i40e_aq_debug_write_register(hw,
9463                                                    I40E_VSI_TSR(vsi->vsi_id),
9464                                                    reg, NULL);
9465                 if (ret < 0) {
9466                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
9467                                     vsi->vsi_id);
9468                         return I40E_ERR_CONFIG;
9469                 }
9470         }
9471
9472         /* Configure for double VLAN TX insertion */
9473         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
9474         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
9475                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
9476                 ret = i40e_aq_debug_write_register(hw,
9477                                                    I40E_VSI_L2TAGSTXVALID(
9478                                                    vsi->vsi_id), reg, NULL);
9479                 if (ret < 0) {
9480                         PMD_DRV_LOG(ERR,
9481                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
9482                                 vsi->vsi_id);
9483                         return I40E_ERR_CONFIG;
9484                 }
9485         }
9486
9487         return 0;
9488 }
9489
9490 /**
9491  * i40e_aq_add_mirror_rule
9492  * @hw: pointer to the hardware structure
9493  * @seid: VEB seid to add mirror rule to
9494  * @dst_id: destination vsi seid
9495  * @entries: Buffer which contains the entities to be mirrored
9496  * @count: number of entities contained in the buffer
9497  * @rule_id:the rule_id of the rule to be added
9498  *
9499  * Add a mirror rule for a given veb.
9500  *
9501  **/
9502 static enum i40e_status_code
9503 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
9504                         uint16_t seid, uint16_t dst_id,
9505                         uint16_t rule_type, uint16_t *entries,
9506                         uint16_t count, uint16_t *rule_id)
9507 {
9508         struct i40e_aq_desc desc;
9509         struct i40e_aqc_add_delete_mirror_rule cmd;
9510         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
9511                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
9512                 &desc.params.raw;
9513         uint16_t buff_len;
9514         enum i40e_status_code status;
9515
9516         i40e_fill_default_direct_cmd_desc(&desc,
9517                                           i40e_aqc_opc_add_mirror_rule);
9518         memset(&cmd, 0, sizeof(cmd));
9519
9520         buff_len = sizeof(uint16_t) * count;
9521         desc.datalen = rte_cpu_to_le_16(buff_len);
9522         if (buff_len > 0)
9523                 desc.flags |= rte_cpu_to_le_16(
9524                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
9525         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
9526                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
9527         cmd.num_entries = rte_cpu_to_le_16(count);
9528         cmd.seid = rte_cpu_to_le_16(seid);
9529         cmd.destination = rte_cpu_to_le_16(dst_id);
9530
9531         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
9532         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
9533         PMD_DRV_LOG(INFO,
9534                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
9535                 hw->aq.asq_last_status, resp->rule_id,
9536                 resp->mirror_rules_used, resp->mirror_rules_free);
9537         *rule_id = rte_le_to_cpu_16(resp->rule_id);
9538
9539         return status;
9540 }
9541
9542 /**
9543  * i40e_aq_del_mirror_rule
9544  * @hw: pointer to the hardware structure
9545  * @seid: VEB seid to add mirror rule to
9546  * @entries: Buffer which contains the entities to be mirrored
9547  * @count: number of entities contained in the buffer
9548  * @rule_id:the rule_id of the rule to be delete
9549  *
9550  * Delete a mirror rule for a given veb.
9551  *
9552  **/
9553 static enum i40e_status_code
9554 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
9555                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
9556                 uint16_t count, uint16_t rule_id)
9557 {
9558         struct i40e_aq_desc desc;
9559         struct i40e_aqc_add_delete_mirror_rule cmd;
9560         uint16_t buff_len = 0;
9561         enum i40e_status_code status;
9562         void *buff = NULL;
9563
9564         i40e_fill_default_direct_cmd_desc(&desc,
9565                                           i40e_aqc_opc_delete_mirror_rule);
9566         memset(&cmd, 0, sizeof(cmd));
9567         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
9568                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
9569                                                           I40E_AQ_FLAG_RD));
9570                 cmd.num_entries = count;
9571                 buff_len = sizeof(uint16_t) * count;
9572                 desc.datalen = rte_cpu_to_le_16(buff_len);
9573                 buff = (void *)entries;
9574         } else
9575                 /* rule id is filled in destination field for deleting mirror rule */
9576                 cmd.destination = rte_cpu_to_le_16(rule_id);
9577
9578         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
9579                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
9580         cmd.seid = rte_cpu_to_le_16(seid);
9581
9582         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
9583         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
9584
9585         return status;
9586 }
9587
9588 /**
9589  * i40e_mirror_rule_set
9590  * @dev: pointer to the hardware structure
9591  * @mirror_conf: mirror rule info
9592  * @sw_id: mirror rule's sw_id
9593  * @on: enable/disable
9594  *
9595  * set a mirror rule.
9596  *
9597  **/
9598 static int
9599 i40e_mirror_rule_set(struct rte_eth_dev *dev,
9600                         struct rte_eth_mirror_conf *mirror_conf,
9601                         uint8_t sw_id, uint8_t on)
9602 {
9603         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9604         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9605         struct i40e_mirror_rule *it, *mirr_rule = NULL;
9606         struct i40e_mirror_rule *parent = NULL;
9607         uint16_t seid, dst_seid, rule_id;
9608         uint16_t i, j = 0;
9609         int ret;
9610
9611         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
9612
9613         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
9614                 PMD_DRV_LOG(ERR,
9615                         "mirror rule can not be configured without veb or vfs.");
9616                 return -ENOSYS;
9617         }
9618         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
9619                 PMD_DRV_LOG(ERR, "mirror table is full.");
9620                 return -ENOSPC;
9621         }
9622         if (mirror_conf->dst_pool > pf->vf_num) {
9623                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
9624                                  mirror_conf->dst_pool);
9625                 return -EINVAL;
9626         }
9627
9628         seid = pf->main_vsi->veb->seid;
9629
9630         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
9631                 if (sw_id <= it->index) {
9632                         mirr_rule = it;
9633                         break;
9634                 }
9635                 parent = it;
9636         }
9637         if (mirr_rule && sw_id == mirr_rule->index) {
9638                 if (on) {
9639                         PMD_DRV_LOG(ERR, "mirror rule exists.");
9640                         return -EEXIST;
9641                 } else {
9642                         ret = i40e_aq_del_mirror_rule(hw, seid,
9643                                         mirr_rule->rule_type,
9644                                         mirr_rule->entries,
9645                                         mirr_rule->num_entries, mirr_rule->id);
9646                         if (ret < 0) {
9647                                 PMD_DRV_LOG(ERR,
9648                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
9649                                         ret, hw->aq.asq_last_status);
9650                                 return -ENOSYS;
9651                         }
9652                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
9653                         rte_free(mirr_rule);
9654                         pf->nb_mirror_rule--;
9655                         return 0;
9656                 }
9657         } else if (!on) {
9658                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
9659                 return -ENOENT;
9660         }
9661
9662         mirr_rule = rte_zmalloc("i40e_mirror_rule",
9663                                 sizeof(struct i40e_mirror_rule) , 0);
9664         if (!mirr_rule) {
9665                 PMD_DRV_LOG(ERR, "failed to allocate memory");
9666                 return I40E_ERR_NO_MEMORY;
9667         }
9668         switch (mirror_conf->rule_type) {
9669         case ETH_MIRROR_VLAN:
9670                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
9671                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
9672                                 mirr_rule->entries[j] =
9673                                         mirror_conf->vlan.vlan_id[i];
9674                                 j++;
9675                         }
9676                 }
9677                 if (j == 0) {
9678                         PMD_DRV_LOG(ERR, "vlan is not specified.");
9679                         rte_free(mirr_rule);
9680                         return -EINVAL;
9681                 }
9682                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
9683                 break;
9684         case ETH_MIRROR_VIRTUAL_POOL_UP:
9685         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
9686                 /* check if the specified pool bit is out of range */
9687                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
9688                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
9689                         rte_free(mirr_rule);
9690                         return -EINVAL;
9691                 }
9692                 for (i = 0, j = 0; i < pf->vf_num; i++) {
9693                         if (mirror_conf->pool_mask & (1ULL << i)) {
9694                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
9695                                 j++;
9696                         }
9697                 }
9698                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
9699                         /* add pf vsi to entries */
9700                         mirr_rule->entries[j] = pf->main_vsi_seid;
9701                         j++;
9702                 }
9703                 if (j == 0) {
9704                         PMD_DRV_LOG(ERR, "pool is not specified.");
9705                         rte_free(mirr_rule);
9706                         return -EINVAL;
9707                 }
9708                 /* egress and ingress in aq commands means from switch but not port */
9709                 mirr_rule->rule_type =
9710                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
9711                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
9712                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
9713                 break;
9714         case ETH_MIRROR_UPLINK_PORT:
9715                 /* egress and ingress in aq commands means from switch but not port*/
9716                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
9717                 break;
9718         case ETH_MIRROR_DOWNLINK_PORT:
9719                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
9720                 break;
9721         default:
9722                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
9723                         mirror_conf->rule_type);
9724                 rte_free(mirr_rule);
9725                 return -EINVAL;
9726         }
9727
9728         /* If the dst_pool is equal to vf_num, consider it as PF */
9729         if (mirror_conf->dst_pool == pf->vf_num)
9730                 dst_seid = pf->main_vsi_seid;
9731         else
9732                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
9733
9734         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
9735                                       mirr_rule->rule_type, mirr_rule->entries,
9736                                       j, &rule_id);
9737         if (ret < 0) {
9738                 PMD_DRV_LOG(ERR,
9739                         "failed to add mirror rule: ret = %d, aq_err = %d.",
9740                         ret, hw->aq.asq_last_status);
9741                 rte_free(mirr_rule);
9742                 return -ENOSYS;
9743         }
9744
9745         mirr_rule->index = sw_id;
9746         mirr_rule->num_entries = j;
9747         mirr_rule->id = rule_id;
9748         mirr_rule->dst_vsi_seid = dst_seid;
9749
9750         if (parent)
9751                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
9752         else
9753                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
9754
9755         pf->nb_mirror_rule++;
9756         return 0;
9757 }
9758
9759 /**
9760  * i40e_mirror_rule_reset
9761  * @dev: pointer to the device
9762  * @sw_id: mirror rule's sw_id
9763  *
9764  * reset a mirror rule.
9765  *
9766  **/
9767 static int
9768 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
9769 {
9770         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9771         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9772         struct i40e_mirror_rule *it, *mirr_rule = NULL;
9773         uint16_t seid;
9774         int ret;
9775
9776         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
9777
9778         seid = pf->main_vsi->veb->seid;
9779
9780         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
9781                 if (sw_id == it->index) {
9782                         mirr_rule = it;
9783                         break;
9784                 }
9785         }
9786         if (mirr_rule) {
9787                 ret = i40e_aq_del_mirror_rule(hw, seid,
9788                                 mirr_rule->rule_type,
9789                                 mirr_rule->entries,
9790                                 mirr_rule->num_entries, mirr_rule->id);
9791                 if (ret < 0) {
9792                         PMD_DRV_LOG(ERR,
9793                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
9794                                 ret, hw->aq.asq_last_status);
9795                         return -ENOSYS;
9796                 }
9797                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
9798                 rte_free(mirr_rule);
9799                 pf->nb_mirror_rule--;
9800         } else {
9801                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
9802                 return -ENOENT;
9803         }
9804         return 0;
9805 }
9806
9807 static uint64_t
9808 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
9809 {
9810         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9811         uint64_t systim_cycles;
9812
9813         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
9814         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
9815                         << 32;
9816
9817         return systim_cycles;
9818 }
9819
9820 static uint64_t
9821 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
9822 {
9823         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9824         uint64_t rx_tstamp;
9825
9826         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
9827         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
9828                         << 32;
9829
9830         return rx_tstamp;
9831 }
9832
9833 static uint64_t
9834 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
9835 {
9836         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9837         uint64_t tx_tstamp;
9838
9839         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
9840         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
9841                         << 32;
9842
9843         return tx_tstamp;
9844 }
9845
9846 static void
9847 i40e_start_timecounters(struct rte_eth_dev *dev)
9848 {
9849         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9850         struct i40e_adapter *adapter =
9851                         (struct i40e_adapter *)dev->data->dev_private;
9852         struct rte_eth_link link;
9853         uint32_t tsync_inc_l;
9854         uint32_t tsync_inc_h;
9855
9856         /* Get current link speed. */
9857         memset(&link, 0, sizeof(link));
9858         i40e_dev_link_update(dev, 1);
9859         rte_i40e_dev_atomic_read_link_status(dev, &link);
9860
9861         switch (link.link_speed) {
9862         case ETH_SPEED_NUM_40G:
9863                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
9864                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
9865                 break;
9866         case ETH_SPEED_NUM_10G:
9867                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
9868                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
9869                 break;
9870         case ETH_SPEED_NUM_1G:
9871                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
9872                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
9873                 break;
9874         default:
9875                 tsync_inc_l = 0x0;
9876                 tsync_inc_h = 0x0;
9877         }
9878
9879         /* Set the timesync increment value. */
9880         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
9881         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
9882
9883         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
9884         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
9885         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
9886
9887         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
9888         adapter->systime_tc.cc_shift = 0;
9889         adapter->systime_tc.nsec_mask = 0;
9890
9891         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
9892         adapter->rx_tstamp_tc.cc_shift = 0;
9893         adapter->rx_tstamp_tc.nsec_mask = 0;
9894
9895         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
9896         adapter->tx_tstamp_tc.cc_shift = 0;
9897         adapter->tx_tstamp_tc.nsec_mask = 0;
9898 }
9899
9900 static int
9901 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
9902 {
9903         struct i40e_adapter *adapter =
9904                         (struct i40e_adapter *)dev->data->dev_private;
9905
9906         adapter->systime_tc.nsec += delta;
9907         adapter->rx_tstamp_tc.nsec += delta;
9908         adapter->tx_tstamp_tc.nsec += delta;
9909
9910         return 0;
9911 }
9912
9913 static int
9914 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
9915 {
9916         uint64_t ns;
9917         struct i40e_adapter *adapter =
9918                         (struct i40e_adapter *)dev->data->dev_private;
9919
9920         ns = rte_timespec_to_ns(ts);
9921
9922         /* Set the timecounters to a new value. */
9923         adapter->systime_tc.nsec = ns;
9924         adapter->rx_tstamp_tc.nsec = ns;
9925         adapter->tx_tstamp_tc.nsec = ns;
9926
9927         return 0;
9928 }
9929
9930 static int
9931 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
9932 {
9933         uint64_t ns, systime_cycles;
9934         struct i40e_adapter *adapter =
9935                         (struct i40e_adapter *)dev->data->dev_private;
9936
9937         systime_cycles = i40e_read_systime_cyclecounter(dev);
9938         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
9939         *ts = rte_ns_to_timespec(ns);
9940
9941         return 0;
9942 }
9943
9944 static int
9945 i40e_timesync_enable(struct rte_eth_dev *dev)
9946 {
9947         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9948         uint32_t tsync_ctl_l;
9949         uint32_t tsync_ctl_h;
9950
9951         /* Stop the timesync system time. */
9952         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
9953         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
9954         /* Reset the timesync system time value. */
9955         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
9956         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
9957
9958         i40e_start_timecounters(dev);
9959
9960         /* Clear timesync registers. */
9961         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
9962         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
9963         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
9964         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
9965         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
9966         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
9967
9968         /* Enable timestamping of PTP packets. */
9969         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
9970         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
9971
9972         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
9973         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
9974         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
9975
9976         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
9977         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
9978
9979         return 0;
9980 }
9981
9982 static int
9983 i40e_timesync_disable(struct rte_eth_dev *dev)
9984 {
9985         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9986         uint32_t tsync_ctl_l;
9987         uint32_t tsync_ctl_h;
9988
9989         /* Disable timestamping of transmitted PTP packets. */
9990         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
9991         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
9992
9993         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
9994         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
9995
9996         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
9997         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
9998
9999         /* Reset the timesync increment value. */
10000         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10001         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10002
10003         return 0;
10004 }
10005
10006 static int
10007 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10008                                 struct timespec *timestamp, uint32_t flags)
10009 {
10010         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10011         struct i40e_adapter *adapter =
10012                 (struct i40e_adapter *)dev->data->dev_private;
10013
10014         uint32_t sync_status;
10015         uint32_t index = flags & 0x03;
10016         uint64_t rx_tstamp_cycles;
10017         uint64_t ns;
10018
10019         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10020         if ((sync_status & (1 << index)) == 0)
10021                 return -EINVAL;
10022
10023         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10024         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10025         *timestamp = rte_ns_to_timespec(ns);
10026
10027         return 0;
10028 }
10029
10030 static int
10031 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10032                                 struct timespec *timestamp)
10033 {
10034         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10035         struct i40e_adapter *adapter =
10036                 (struct i40e_adapter *)dev->data->dev_private;
10037
10038         uint32_t sync_status;
10039         uint64_t tx_tstamp_cycles;
10040         uint64_t ns;
10041
10042         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10043         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10044                 return -EINVAL;
10045
10046         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10047         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10048         *timestamp = rte_ns_to_timespec(ns);
10049
10050         return 0;
10051 }
10052
10053 /*
10054  * i40e_parse_dcb_configure - parse dcb configure from user
10055  * @dev: the device being configured
10056  * @dcb_cfg: pointer of the result of parse
10057  * @*tc_map: bit map of enabled traffic classes
10058  *
10059  * Returns 0 on success, negative value on failure
10060  */
10061 static int
10062 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10063                          struct i40e_dcbx_config *dcb_cfg,
10064                          uint8_t *tc_map)
10065 {
10066         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10067         uint8_t i, tc_bw, bw_lf;
10068
10069         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10070
10071         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10072         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10073                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10074                 return -EINVAL;
10075         }
10076
10077         /* assume each tc has the same bw */
10078         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10079         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10080                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10081         /* to ensure the sum of tcbw is equal to 100 */
10082         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10083         for (i = 0; i < bw_lf; i++)
10084                 dcb_cfg->etscfg.tcbwtable[i]++;
10085
10086         /* assume each tc has the same Transmission Selection Algorithm */
10087         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10088                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10089
10090         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10091                 dcb_cfg->etscfg.prioritytable[i] =
10092                                 dcb_rx_conf->dcb_tc[i];
10093
10094         /* FW needs one App to configure HW */
10095         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10096         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10097         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10098         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10099
10100         if (dcb_rx_conf->nb_tcs == 0)
10101                 *tc_map = 1; /* tc0 only */
10102         else
10103                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10104
10105         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10106                 dcb_cfg->pfc.willing = 0;
10107                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10108                 dcb_cfg->pfc.pfcenable = *tc_map;
10109         }
10110         return 0;
10111 }
10112
10113
10114 static enum i40e_status_code
10115 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10116                               struct i40e_aqc_vsi_properties_data *info,
10117                               uint8_t enabled_tcmap)
10118 {
10119         enum i40e_status_code ret;
10120         int i, total_tc = 0;
10121         uint16_t qpnum_per_tc, bsf, qp_idx;
10122         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10123         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10124         uint16_t used_queues;
10125
10126         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10127         if (ret != I40E_SUCCESS)
10128                 return ret;
10129
10130         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10131                 if (enabled_tcmap & (1 << i))
10132                         total_tc++;
10133         }
10134         if (total_tc == 0)
10135                 total_tc = 1;
10136         vsi->enabled_tc = enabled_tcmap;
10137
10138         /* different VSI has different queues assigned */
10139         if (vsi->type == I40E_VSI_MAIN)
10140                 used_queues = dev_data->nb_rx_queues -
10141                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10142         else if (vsi->type == I40E_VSI_VMDQ2)
10143                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10144         else {
10145                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10146                 return I40E_ERR_NO_AVAILABLE_VSI;
10147         }
10148
10149         qpnum_per_tc = used_queues / total_tc;
10150         /* Number of queues per enabled TC */
10151         if (qpnum_per_tc == 0) {
10152                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10153                 return I40E_ERR_INVALID_QP_ID;
10154         }
10155         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10156                                 I40E_MAX_Q_PER_TC);
10157         bsf = rte_bsf32(qpnum_per_tc);
10158
10159         /**
10160          * Configure TC and queue mapping parameters, for enabled TC,
10161          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10162          * default queue will serve it.
10163          */
10164         qp_idx = 0;
10165         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10166                 if (vsi->enabled_tc & (1 << i)) {
10167                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10168                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10169                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10170                         qp_idx += qpnum_per_tc;
10171                 } else
10172                         info->tc_mapping[i] = 0;
10173         }
10174
10175         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10176         if (vsi->type == I40E_VSI_SRIOV) {
10177                 info->mapping_flags |=
10178                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10179                 for (i = 0; i < vsi->nb_qps; i++)
10180                         info->queue_mapping[i] =
10181                                 rte_cpu_to_le_16(vsi->base_queue + i);
10182         } else {
10183                 info->mapping_flags |=
10184                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10185                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10186         }
10187         info->valid_sections |=
10188                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10189
10190         return I40E_SUCCESS;
10191 }
10192
10193 /*
10194  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10195  * @veb: VEB to be configured
10196  * @tc_map: enabled TC bitmap
10197  *
10198  * Returns 0 on success, negative value on failure
10199  */
10200 static enum i40e_status_code
10201 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10202 {
10203         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10204         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10205         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10206         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10207         enum i40e_status_code ret = I40E_SUCCESS;
10208         int i;
10209         uint32_t bw_max;
10210
10211         /* Check if enabled_tc is same as existing or new TCs */
10212         if (veb->enabled_tc == tc_map)
10213                 return ret;
10214
10215         /* configure tc bandwidth */
10216         memset(&veb_bw, 0, sizeof(veb_bw));
10217         veb_bw.tc_valid_bits = tc_map;
10218         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10219         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10220                 if (tc_map & BIT_ULL(i))
10221                         veb_bw.tc_bw_share_credits[i] = 1;
10222         }
10223         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10224                                                    &veb_bw, NULL);
10225         if (ret) {
10226                 PMD_INIT_LOG(ERR,
10227                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10228                         hw->aq.asq_last_status);
10229                 return ret;
10230         }
10231
10232         memset(&ets_query, 0, sizeof(ets_query));
10233         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10234                                                    &ets_query, NULL);
10235         if (ret != I40E_SUCCESS) {
10236                 PMD_DRV_LOG(ERR,
10237                         "Failed to get switch_comp ETS configuration %u",
10238                         hw->aq.asq_last_status);
10239                 return ret;
10240         }
10241         memset(&bw_query, 0, sizeof(bw_query));
10242         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10243                                                   &bw_query, NULL);
10244         if (ret != I40E_SUCCESS) {
10245                 PMD_DRV_LOG(ERR,
10246                         "Failed to get switch_comp bandwidth configuration %u",
10247                         hw->aq.asq_last_status);
10248                 return ret;
10249         }
10250
10251         /* store and print out BW info */
10252         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10253         veb->bw_info.bw_max = ets_query.tc_bw_max;
10254         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10255         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10256         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10257                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10258                      I40E_16_BIT_WIDTH);
10259         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10260                 veb->bw_info.bw_ets_share_credits[i] =
10261                                 bw_query.tc_bw_share_credits[i];
10262                 veb->bw_info.bw_ets_credits[i] =
10263                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10264                 /* 4 bits per TC, 4th bit is reserved */
10265                 veb->bw_info.bw_ets_max[i] =
10266                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10267                                   RTE_LEN2MASK(3, uint8_t));
10268                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10269                             veb->bw_info.bw_ets_share_credits[i]);
10270                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10271                             veb->bw_info.bw_ets_credits[i]);
10272                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10273                             veb->bw_info.bw_ets_max[i]);
10274         }
10275
10276         veb->enabled_tc = tc_map;
10277
10278         return ret;
10279 }
10280
10281
10282 /*
10283  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10284  * @vsi: VSI to be configured
10285  * @tc_map: enabled TC bitmap
10286  *
10287  * Returns 0 on success, negative value on failure
10288  */
10289 static enum i40e_status_code
10290 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10291 {
10292         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10293         struct i40e_vsi_context ctxt;
10294         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10295         enum i40e_status_code ret = I40E_SUCCESS;
10296         int i;
10297
10298         /* Check if enabled_tc is same as existing or new TCs */
10299         if (vsi->enabled_tc == tc_map)
10300                 return ret;
10301
10302         /* configure tc bandwidth */
10303         memset(&bw_data, 0, sizeof(bw_data));
10304         bw_data.tc_valid_bits = tc_map;
10305         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10306         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10307                 if (tc_map & BIT_ULL(i))
10308                         bw_data.tc_bw_credits[i] = 1;
10309         }
10310         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10311         if (ret) {
10312                 PMD_INIT_LOG(ERR,
10313                         "AQ command Config VSI BW allocation per TC failed = %d",
10314                         hw->aq.asq_last_status);
10315                 goto out;
10316         }
10317         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10318                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10319
10320         /* Update Queue Pairs Mapping for currently enabled UPs */
10321         ctxt.seid = vsi->seid;
10322         ctxt.pf_num = hw->pf_id;
10323         ctxt.vf_num = 0;
10324         ctxt.uplink_seid = vsi->uplink_seid;
10325         ctxt.info = vsi->info;
10326         i40e_get_cap(hw);
10327         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10328         if (ret)
10329                 goto out;
10330
10331         /* Update the VSI after updating the VSI queue-mapping information */
10332         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10333         if (ret) {
10334                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10335                         hw->aq.asq_last_status);
10336                 goto out;
10337         }
10338         /* update the local VSI info with updated queue map */
10339         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10340                                         sizeof(vsi->info.tc_mapping));
10341         rte_memcpy(&vsi->info.queue_mapping,
10342                         &ctxt.info.queue_mapping,
10343                 sizeof(vsi->info.queue_mapping));
10344         vsi->info.mapping_flags = ctxt.info.mapping_flags;
10345         vsi->info.valid_sections = 0;
10346
10347         /* query and update current VSI BW information */
10348         ret = i40e_vsi_get_bw_config(vsi);
10349         if (ret) {
10350                 PMD_INIT_LOG(ERR,
10351                          "Failed updating vsi bw info, err %s aq_err %s",
10352                          i40e_stat_str(hw, ret),
10353                          i40e_aq_str(hw, hw->aq.asq_last_status));
10354                 goto out;
10355         }
10356
10357         vsi->enabled_tc = tc_map;
10358
10359 out:
10360         return ret;
10361 }
10362
10363 /*
10364  * i40e_dcb_hw_configure - program the dcb setting to hw
10365  * @pf: pf the configuration is taken on
10366  * @new_cfg: new configuration
10367  * @tc_map: enabled TC bitmap
10368  *
10369  * Returns 0 on success, negative value on failure
10370  */
10371 static enum i40e_status_code
10372 i40e_dcb_hw_configure(struct i40e_pf *pf,
10373                       struct i40e_dcbx_config *new_cfg,
10374                       uint8_t tc_map)
10375 {
10376         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10377         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
10378         struct i40e_vsi *main_vsi = pf->main_vsi;
10379         struct i40e_vsi_list *vsi_list;
10380         enum i40e_status_code ret;
10381         int i;
10382         uint32_t val;
10383
10384         /* Use the FW API if FW > v4.4*/
10385         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
10386               (hw->aq.fw_maj_ver >= 5))) {
10387                 PMD_INIT_LOG(ERR,
10388                         "FW < v4.4, can not use FW LLDP API to configure DCB");
10389                 return I40E_ERR_FIRMWARE_API_VERSION;
10390         }
10391
10392         /* Check if need reconfiguration */
10393         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
10394                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
10395                 return I40E_SUCCESS;
10396         }
10397
10398         /* Copy the new config to the current config */
10399         *old_cfg = *new_cfg;
10400         old_cfg->etsrec = old_cfg->etscfg;
10401         ret = i40e_set_dcb_config(hw);
10402         if (ret) {
10403                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
10404                          i40e_stat_str(hw, ret),
10405                          i40e_aq_str(hw, hw->aq.asq_last_status));
10406                 return ret;
10407         }
10408         /* set receive Arbiter to RR mode and ETS scheme by default */
10409         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
10410                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
10411                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
10412                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
10413                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
10414                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
10415                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
10416                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
10417                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
10418                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
10419                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
10420                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
10421                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
10422         }
10423         /* get local mib to check whether it is configured correctly */
10424         /* IEEE mode */
10425         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
10426         /* Get Local DCB Config */
10427         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
10428                                      &hw->local_dcbx_config);
10429
10430         /* if Veb is created, need to update TC of it at first */
10431         if (main_vsi->veb) {
10432                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
10433                 if (ret)
10434                         PMD_INIT_LOG(WARNING,
10435                                  "Failed configuring TC for VEB seid=%d",
10436                                  main_vsi->veb->seid);
10437         }
10438         /* Update each VSI */
10439         i40e_vsi_config_tc(main_vsi, tc_map);
10440         if (main_vsi->veb) {
10441                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
10442                         /* Beside main VSI and VMDQ VSIs, only enable default
10443                          * TC for other VSIs
10444                          */
10445                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
10446                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10447                                                          tc_map);
10448                         else
10449                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10450                                                          I40E_DEFAULT_TCMAP);
10451                         if (ret)
10452                                 PMD_INIT_LOG(WARNING,
10453                                         "Failed configuring TC for VSI seid=%d",
10454                                         vsi_list->vsi->seid);
10455                         /* continue */
10456                 }
10457         }
10458         return I40E_SUCCESS;
10459 }
10460
10461 /*
10462  * i40e_dcb_init_configure - initial dcb config
10463  * @dev: device being configured
10464  * @sw_dcb: indicate whether dcb is sw configured or hw offload
10465  *
10466  * Returns 0 on success, negative value on failure
10467  */
10468 int
10469 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
10470 {
10471         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10472         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10473         int i, ret = 0;
10474
10475         if ((pf->flags & I40E_FLAG_DCB) == 0) {
10476                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10477                 return -ENOTSUP;
10478         }
10479
10480         /* DCB initialization:
10481          * Update DCB configuration from the Firmware and configure
10482          * LLDP MIB change event.
10483          */
10484         if (sw_dcb == TRUE) {
10485                 ret = i40e_init_dcb(hw);
10486                 /* If lldp agent is stopped, the return value from
10487                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
10488                  * adminq status. Otherwise, it should return success.
10489                  */
10490                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
10491                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
10492                         memset(&hw->local_dcbx_config, 0,
10493                                 sizeof(struct i40e_dcbx_config));
10494                         /* set dcb default configuration */
10495                         hw->local_dcbx_config.etscfg.willing = 0;
10496                         hw->local_dcbx_config.etscfg.maxtcs = 0;
10497                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
10498                         hw->local_dcbx_config.etscfg.tsatable[0] =
10499                                                 I40E_IEEE_TSA_ETS;
10500                         /* all UPs mapping to TC0 */
10501                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10502                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
10503                         hw->local_dcbx_config.etsrec =
10504                                 hw->local_dcbx_config.etscfg;
10505                         hw->local_dcbx_config.pfc.willing = 0;
10506                         hw->local_dcbx_config.pfc.pfccap =
10507                                                 I40E_MAX_TRAFFIC_CLASS;
10508                         /* FW needs one App to configure HW */
10509                         hw->local_dcbx_config.numapps = 1;
10510                         hw->local_dcbx_config.app[0].selector =
10511                                                 I40E_APP_SEL_ETHTYPE;
10512                         hw->local_dcbx_config.app[0].priority = 3;
10513                         hw->local_dcbx_config.app[0].protocolid =
10514                                                 I40E_APP_PROTOID_FCOE;
10515                         ret = i40e_set_dcb_config(hw);
10516                         if (ret) {
10517                                 PMD_INIT_LOG(ERR,
10518                                         "default dcb config fails. err = %d, aq_err = %d.",
10519                                         ret, hw->aq.asq_last_status);
10520                                 return -ENOSYS;
10521                         }
10522                 } else {
10523                         PMD_INIT_LOG(ERR,
10524                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
10525                                 ret, hw->aq.asq_last_status);
10526                         return -ENOTSUP;
10527                 }
10528         } else {
10529                 ret = i40e_aq_start_lldp(hw, NULL);
10530                 if (ret != I40E_SUCCESS)
10531                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
10532
10533                 ret = i40e_init_dcb(hw);
10534                 if (!ret) {
10535                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
10536                                 PMD_INIT_LOG(ERR,
10537                                         "HW doesn't support DCBX offload.");
10538                                 return -ENOTSUP;
10539                         }
10540                 } else {
10541                         PMD_INIT_LOG(ERR,
10542                                 "DCBX configuration failed, err = %d, aq_err = %d.",
10543                                 ret, hw->aq.asq_last_status);
10544                         return -ENOTSUP;
10545                 }
10546         }
10547         return 0;
10548 }
10549
10550 /*
10551  * i40e_dcb_setup - setup dcb related config
10552  * @dev: device being configured
10553  *
10554  * Returns 0 on success, negative value on failure
10555  */
10556 static int
10557 i40e_dcb_setup(struct rte_eth_dev *dev)
10558 {
10559         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10560         struct i40e_dcbx_config dcb_cfg;
10561         uint8_t tc_map = 0;
10562         int ret = 0;
10563
10564         if ((pf->flags & I40E_FLAG_DCB) == 0) {
10565                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10566                 return -ENOTSUP;
10567         }
10568
10569         if (pf->vf_num != 0)
10570                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
10571
10572         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
10573         if (ret) {
10574                 PMD_INIT_LOG(ERR, "invalid dcb config");
10575                 return -EINVAL;
10576         }
10577         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
10578         if (ret) {
10579                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
10580                 return -ENOSYS;
10581         }
10582
10583         return 0;
10584 }
10585
10586 static int
10587 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
10588                       struct rte_eth_dcb_info *dcb_info)
10589 {
10590         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10591         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10592         struct i40e_vsi *vsi = pf->main_vsi;
10593         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
10594         uint16_t bsf, tc_mapping;
10595         int i, j = 0;
10596
10597         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
10598                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
10599         else
10600                 dcb_info->nb_tcs = 1;
10601         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10602                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
10603         for (i = 0; i < dcb_info->nb_tcs; i++)
10604                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
10605
10606         /* get queue mapping if vmdq is disabled */
10607         if (!pf->nb_cfg_vmdq_vsi) {
10608                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10609                         if (!(vsi->enabled_tc & (1 << i)))
10610                                 continue;
10611                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
10612                         dcb_info->tc_queue.tc_rxq[j][i].base =
10613                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
10614                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
10615                         dcb_info->tc_queue.tc_txq[j][i].base =
10616                                 dcb_info->tc_queue.tc_rxq[j][i].base;
10617                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
10618                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
10619                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
10620                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
10621                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
10622                 }
10623                 return 0;
10624         }
10625
10626         /* get queue mapping if vmdq is enabled */
10627         do {
10628                 vsi = pf->vmdq[j].vsi;
10629                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10630                         if (!(vsi->enabled_tc & (1 << i)))
10631                                 continue;
10632                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
10633                         dcb_info->tc_queue.tc_rxq[j][i].base =
10634                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
10635                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
10636                         dcb_info->tc_queue.tc_txq[j][i].base =
10637                                 dcb_info->tc_queue.tc_rxq[j][i].base;
10638                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
10639                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
10640                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
10641                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
10642                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
10643                 }
10644                 j++;
10645         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
10646         return 0;
10647 }
10648
10649 static int
10650 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
10651 {
10652         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10653         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
10654         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10655         uint16_t interval =
10656                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
10657         uint16_t msix_intr;
10658
10659         msix_intr = intr_handle->intr_vec[queue_id];
10660         if (msix_intr == I40E_MISC_VEC_ID)
10661                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
10662                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
10663                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
10664                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
10665                                (interval <<
10666                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
10667         else
10668                 I40E_WRITE_REG(hw,
10669                                I40E_PFINT_DYN_CTLN(msix_intr -
10670                                                    I40E_RX_VEC_START),
10671                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
10672                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
10673                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
10674                                (interval <<
10675                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
10676
10677         I40E_WRITE_FLUSH(hw);
10678         rte_intr_enable(&pci_dev->intr_handle);
10679
10680         return 0;
10681 }
10682
10683 static int
10684 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
10685 {
10686         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10687         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
10688         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10689         uint16_t msix_intr;
10690
10691         msix_intr = intr_handle->intr_vec[queue_id];
10692         if (msix_intr == I40E_MISC_VEC_ID)
10693                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
10694         else
10695                 I40E_WRITE_REG(hw,
10696                                I40E_PFINT_DYN_CTLN(msix_intr -
10697                                                    I40E_RX_VEC_START),
10698                                0);
10699         I40E_WRITE_FLUSH(hw);
10700
10701         return 0;
10702 }
10703
10704 static int i40e_get_regs(struct rte_eth_dev *dev,
10705                          struct rte_dev_reg_info *regs)
10706 {
10707         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10708         uint32_t *ptr_data = regs->data;
10709         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
10710         const struct i40e_reg_info *reg_info;
10711
10712         if (ptr_data == NULL) {
10713                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
10714                 regs->width = sizeof(uint32_t);
10715                 return 0;
10716         }
10717
10718         /* The first few registers have to be read using AQ operations */
10719         reg_idx = 0;
10720         while (i40e_regs_adminq[reg_idx].name) {
10721                 reg_info = &i40e_regs_adminq[reg_idx++];
10722                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
10723                         for (arr_idx2 = 0;
10724                                         arr_idx2 <= reg_info->count2;
10725                                         arr_idx2++) {
10726                                 reg_offset = arr_idx * reg_info->stride1 +
10727                                         arr_idx2 * reg_info->stride2;
10728                                 reg_offset += reg_info->base_addr;
10729                                 ptr_data[reg_offset >> 2] =
10730                                         i40e_read_rx_ctl(hw, reg_offset);
10731                         }
10732         }
10733
10734         /* The remaining registers can be read using primitives */
10735         reg_idx = 0;
10736         while (i40e_regs_others[reg_idx].name) {
10737                 reg_info = &i40e_regs_others[reg_idx++];
10738                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
10739                         for (arr_idx2 = 0;
10740                                         arr_idx2 <= reg_info->count2;
10741                                         arr_idx2++) {
10742                                 reg_offset = arr_idx * reg_info->stride1 +
10743                                         arr_idx2 * reg_info->stride2;
10744                                 reg_offset += reg_info->base_addr;
10745                                 ptr_data[reg_offset >> 2] =
10746                                         I40E_READ_REG(hw, reg_offset);
10747                         }
10748         }
10749
10750         return 0;
10751 }
10752
10753 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
10754 {
10755         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10756
10757         /* Convert word count to byte count */
10758         return hw->nvm.sr_size << 1;
10759 }
10760
10761 static int i40e_get_eeprom(struct rte_eth_dev *dev,
10762                            struct rte_dev_eeprom_info *eeprom)
10763 {
10764         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10765         uint16_t *data = eeprom->data;
10766         uint16_t offset, length, cnt_words;
10767         int ret_code;
10768
10769         offset = eeprom->offset >> 1;
10770         length = eeprom->length >> 1;
10771         cnt_words = length;
10772
10773         if (offset > hw->nvm.sr_size ||
10774                 offset + length > hw->nvm.sr_size) {
10775                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
10776                 return -EINVAL;
10777         }
10778
10779         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
10780
10781         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
10782         if (ret_code != I40E_SUCCESS || cnt_words != length) {
10783                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
10784                 return -EIO;
10785         }
10786
10787         return 0;
10788 }
10789
10790 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
10791                                       struct ether_addr *mac_addr)
10792 {
10793         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10794
10795         if (!is_valid_assigned_ether_addr(mac_addr)) {
10796                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
10797                 return;
10798         }
10799
10800         /* Flags: 0x3 updates port address */
10801         i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
10802 }
10803
10804 static int
10805 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
10806 {
10807         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10808         struct rte_eth_dev_data *dev_data = pf->dev_data;
10809         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
10810         int ret = 0;
10811
10812         /* check if mtu is within the allowed range */
10813         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
10814                 return -EINVAL;
10815
10816         /* mtu setting is forbidden if port is start */
10817         if (dev_data->dev_started) {
10818                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
10819                             dev_data->port_id);
10820                 return -EBUSY;
10821         }
10822
10823         if (frame_size > ETHER_MAX_LEN)
10824                 dev_data->dev_conf.rxmode.jumbo_frame = 1;
10825         else
10826                 dev_data->dev_conf.rxmode.jumbo_frame = 0;
10827
10828         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
10829
10830         return ret;
10831 }
10832
10833 /* Restore ethertype filter */
10834 static void
10835 i40e_ethertype_filter_restore(struct i40e_pf *pf)
10836 {
10837         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10838         struct i40e_ethertype_filter_list
10839                 *ethertype_list = &pf->ethertype.ethertype_list;
10840         struct i40e_ethertype_filter *f;
10841         struct i40e_control_filter_stats stats;
10842         uint16_t flags;
10843
10844         TAILQ_FOREACH(f, ethertype_list, rules) {
10845                 flags = 0;
10846                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
10847                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
10848                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
10849                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
10850                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
10851
10852                 memset(&stats, 0, sizeof(stats));
10853                 i40e_aq_add_rem_control_packet_filter(hw,
10854                                             f->input.mac_addr.addr_bytes,
10855                                             f->input.ether_type,
10856                                             flags, pf->main_vsi->seid,
10857                                             f->queue, 1, &stats, NULL);
10858         }
10859         PMD_DRV_LOG(INFO, "Ethertype filter:"
10860                     " mac_etype_used = %u, etype_used = %u,"
10861                     " mac_etype_free = %u, etype_free = %u",
10862                     stats.mac_etype_used, stats.etype_used,
10863                     stats.mac_etype_free, stats.etype_free);
10864 }
10865
10866 /* Restore tunnel filter */
10867 static void
10868 i40e_tunnel_filter_restore(struct i40e_pf *pf)
10869 {
10870         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10871         struct i40e_vsi *vsi;
10872         struct i40e_pf_vf *vf;
10873         struct i40e_tunnel_filter_list
10874                 *tunnel_list = &pf->tunnel.tunnel_list;
10875         struct i40e_tunnel_filter *f;
10876         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
10877         bool big_buffer = 0;
10878
10879         TAILQ_FOREACH(f, tunnel_list, rules) {
10880                 if (!f->is_to_vf)
10881                         vsi = pf->main_vsi;
10882                 else {
10883                         vf = &pf->vfs[f->vf_id];
10884                         vsi = vf->vsi;
10885                 }
10886                 memset(&cld_filter, 0, sizeof(cld_filter));
10887                 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
10888                         (struct ether_addr *)&cld_filter.element.outer_mac);
10889                 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
10890                         (struct ether_addr *)&cld_filter.element.inner_mac);
10891                 cld_filter.element.inner_vlan = f->input.inner_vlan;
10892                 cld_filter.element.flags = f->input.flags;
10893                 cld_filter.element.tenant_id = f->input.tenant_id;
10894                 cld_filter.element.queue_number = f->queue;
10895                 rte_memcpy(cld_filter.general_fields,
10896                            f->input.general_fields,
10897                            sizeof(f->input.general_fields));
10898
10899                 if (((f->input.flags &
10900                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
10901                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
10902                     ((f->input.flags &
10903                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
10904                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
10905                     ((f->input.flags &
10906                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
10907                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
10908                         big_buffer = 1;
10909
10910                 if (big_buffer)
10911                         i40e_aq_add_cloud_filters_big_buffer(hw,
10912                                              vsi->seid, &cld_filter, 1);
10913                 else
10914                         i40e_aq_add_cloud_filters(hw, vsi->seid,
10915                                                   &cld_filter.element, 1);
10916         }
10917 }
10918
10919 static void
10920 i40e_filter_restore(struct i40e_pf *pf)
10921 {
10922         i40e_ethertype_filter_restore(pf);
10923         i40e_tunnel_filter_restore(pf);
10924         i40e_fdir_filter_restore(pf);
10925 }
10926
10927 static bool
10928 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
10929 {
10930         if (strcmp(dev->device->driver->name, drv->driver.name))
10931                 return false;
10932
10933         return true;
10934 }
10935
10936 bool
10937 is_i40e_supported(struct rte_eth_dev *dev)
10938 {
10939         return is_device_supported(dev, &rte_i40e_pmd);
10940 }
10941
10942 struct i40e_customized_pctype*
10943 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
10944 {
10945         int i;
10946
10947         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
10948                 if (pf->customized_pctype[i].index == index)
10949                         return &pf->customized_pctype[i];
10950         }
10951         return NULL;
10952 }
10953
10954 static int
10955 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
10956                               uint32_t pkg_size, uint32_t proto_num,
10957                               struct rte_pmd_i40e_proto_info *proto)
10958 {
10959         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10960         uint32_t pctype_num;
10961         struct rte_pmd_i40e_ptype_info *pctype;
10962         uint32_t buff_size;
10963         struct i40e_customized_pctype *new_pctype = NULL;
10964         uint8_t proto_id;
10965         uint8_t pctype_value;
10966         char name[64];
10967         uint32_t i, j, n;
10968         int ret;
10969
10970         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
10971                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
10972                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
10973         if (ret) {
10974                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
10975                 return -1;
10976         }
10977         if (!pctype_num) {
10978                 PMD_DRV_LOG(INFO, "No new pctype added");
10979                 return -1;
10980         }
10981
10982         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
10983         pctype = rte_zmalloc("new_pctype", buff_size, 0);
10984         if (!pctype) {
10985                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
10986                 return -1;
10987         }
10988         /* get information about new pctype list */
10989         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
10990                                         (uint8_t *)pctype, buff_size,
10991                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
10992         if (ret) {
10993                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
10994                 rte_free(pctype);
10995                 return -1;
10996         }
10997
10998         /* Update customized pctype. */
10999         for (i = 0; i < pctype_num; i++) {
11000                 pctype_value = pctype[i].ptype_id;
11001                 memset(name, 0, sizeof(name));
11002                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11003                         proto_id = pctype[i].protocols[j];
11004                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11005                                 continue;
11006                         for (n = 0; n < proto_num; n++) {
11007                                 if (proto[n].proto_id != proto_id)
11008                                         continue;
11009                                 strcat(name, proto[n].name);
11010                                 strcat(name, "_");
11011                                 break;
11012                         }
11013                 }
11014                 name[strlen(name) - 1] = '\0';
11015                 if (!strcmp(name, "GTPC"))
11016                         new_pctype =
11017                                 i40e_find_customized_pctype(pf,
11018                                                       I40E_CUSTOMIZED_GTPC);
11019                 else if (!strcmp(name, "GTPU_IPV4"))
11020                         new_pctype =
11021                                 i40e_find_customized_pctype(pf,
11022                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11023                 else if (!strcmp(name, "GTPU_IPV6"))
11024                         new_pctype =
11025                                 i40e_find_customized_pctype(pf,
11026                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11027                 else if (!strcmp(name, "GTPU"))
11028                         new_pctype =
11029                                 i40e_find_customized_pctype(pf,
11030                                                       I40E_CUSTOMIZED_GTPU);
11031                 if (new_pctype) {
11032                         new_pctype->pctype = pctype_value;
11033                         new_pctype->valid = true;
11034                 }
11035         }
11036
11037         rte_free(pctype);
11038         return 0;
11039 }
11040
11041 static int
11042 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11043                                uint32_t pkg_size, uint32_t proto_num,
11044                                struct rte_pmd_i40e_proto_info *proto)
11045 {
11046         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11047         uint16_t port_id = dev->data->port_id;
11048         uint32_t ptype_num;
11049         struct rte_pmd_i40e_ptype_info *ptype;
11050         uint32_t buff_size;
11051         uint8_t proto_id;
11052         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11053         uint32_t i, j, n;
11054         bool in_tunnel;
11055         int ret;
11056
11057         /* get information about new ptype num */
11058         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11059                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
11060                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11061         if (ret) {
11062                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
11063                 return ret;
11064         }
11065         if (!ptype_num) {
11066                 PMD_DRV_LOG(INFO, "No new ptype added");
11067                 return -1;
11068         }
11069
11070         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11071         ptype = rte_zmalloc("new_ptype", buff_size, 0);
11072         if (!ptype) {
11073                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11074                 return -1;
11075         }
11076
11077         /* get information about new ptype list */
11078         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11079                                         (uint8_t *)ptype, buff_size,
11080                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11081         if (ret) {
11082                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
11083                 rte_free(ptype);
11084                 return ret;
11085         }
11086
11087         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11088         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11089         if (!ptype_mapping) {
11090                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11091                 rte_free(ptype);
11092                 return -1;
11093         }
11094
11095         /* Update ptype mapping table. */
11096         for (i = 0; i < ptype_num; i++) {
11097                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11098                 ptype_mapping[i].sw_ptype = 0;
11099                 in_tunnel = false;
11100                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11101                         proto_id = ptype[i].protocols[j];
11102                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11103                                 continue;
11104                         for (n = 0; n < proto_num; n++) {
11105                                 if (proto[n].proto_id != proto_id)
11106                                         continue;
11107                                 memset(name, 0, sizeof(name));
11108                                 strcpy(name, proto[n].name);
11109                                 if (!strncmp(name, "PPPOE", 5))
11110                                         ptype_mapping[i].sw_ptype |=
11111                                                 RTE_PTYPE_L2_ETHER_PPPOE;
11112                                 else if (!strncmp(name, "OIPV4", 5)) {
11113                                         ptype_mapping[i].sw_ptype |=
11114                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11115                                         in_tunnel = true;
11116                                 } else if (!strncmp(name, "IPV4", 4) &&
11117                                            !in_tunnel)
11118                                         ptype_mapping[i].sw_ptype |=
11119                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11120                                 else if (!strncmp(name, "IPV4FRAG", 8) &&
11121                                          in_tunnel) {
11122                                         ptype_mapping[i].sw_ptype |=
11123                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11124                                         ptype_mapping[i].sw_ptype |=
11125                                                 RTE_PTYPE_INNER_L4_FRAG;
11126                                 } else if (!strncmp(name, "IPV4", 4) &&
11127                                            in_tunnel)
11128                                         ptype_mapping[i].sw_ptype |=
11129                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11130                                 else if (!strncmp(name, "OIPV6", 5)) {
11131                                         ptype_mapping[i].sw_ptype |=
11132                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11133                                         in_tunnel = true;
11134                                 } else if (!strncmp(name, "IPV6", 4) &&
11135                                            !in_tunnel)
11136                                         ptype_mapping[i].sw_ptype |=
11137                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11138                                 else if (!strncmp(name, "IPV6FRAG", 8) &&
11139                                          in_tunnel) {
11140                                         ptype_mapping[i].sw_ptype |=
11141                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11142                                         ptype_mapping[i].sw_ptype |=
11143                                                 RTE_PTYPE_INNER_L4_FRAG;
11144                                 } else if (!strncmp(name, "IPV6", 4) &&
11145                                            in_tunnel)
11146                                         ptype_mapping[i].sw_ptype |=
11147                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11148                                 else if (!strncmp(name, "UDP", 3) && !in_tunnel)
11149                                         ptype_mapping[i].sw_ptype |=
11150                                                 RTE_PTYPE_L4_UDP;
11151                                 else if (!strncmp(name, "UDP", 3) && in_tunnel)
11152                                         ptype_mapping[i].sw_ptype |=
11153                                                 RTE_PTYPE_INNER_L4_UDP;
11154                                 else if (!strncmp(name, "TCP", 3) && !in_tunnel)
11155                                         ptype_mapping[i].sw_ptype |=
11156                                                 RTE_PTYPE_L4_TCP;
11157                                 else if (!strncmp(name, "TCP", 3) && in_tunnel)
11158                                         ptype_mapping[i].sw_ptype |=
11159                                                 RTE_PTYPE_INNER_L4_TCP;
11160                                 else if (!strncmp(name, "SCTP", 4) &&
11161                                          !in_tunnel)
11162                                         ptype_mapping[i].sw_ptype |=
11163                                                 RTE_PTYPE_L4_SCTP;
11164                                 else if (!strncmp(name, "SCTP", 4) && in_tunnel)
11165                                         ptype_mapping[i].sw_ptype |=
11166                                                 RTE_PTYPE_INNER_L4_SCTP;
11167                                 else if ((!strncmp(name, "ICMP", 4) ||
11168                                           !strncmp(name, "ICMPV6", 6)) &&
11169                                          !in_tunnel)
11170                                         ptype_mapping[i].sw_ptype |=
11171                                                 RTE_PTYPE_L4_ICMP;
11172                                 else if ((!strncmp(name, "ICMP", 4) ||
11173                                           !strncmp(name, "ICMPV6", 6)) &&
11174                                          in_tunnel)
11175                                         ptype_mapping[i].sw_ptype |=
11176                                                 RTE_PTYPE_INNER_L4_ICMP;
11177                                 else if (!strncmp(name, "GTPC", 4)) {
11178                                         ptype_mapping[i].sw_ptype |=
11179                                                 RTE_PTYPE_TUNNEL_GTPC;
11180                                         in_tunnel = true;
11181                                 } else if (!strncmp(name, "GTPU", 4)) {
11182                                         ptype_mapping[i].sw_ptype |=
11183                                                 RTE_PTYPE_TUNNEL_GTPU;
11184                                         in_tunnel = true;
11185                                 } else if (!strncmp(name, "GRENAT", 6)) {
11186                                         ptype_mapping[i].sw_ptype |=
11187                                                 RTE_PTYPE_TUNNEL_GRENAT;
11188                                         in_tunnel = true;
11189                                 } else if (!strncmp(name, "L2TPv2CTL", 9)) {
11190                                         ptype_mapping[i].sw_ptype |=
11191                                                 RTE_PTYPE_TUNNEL_L2TP;
11192                                         in_tunnel = true;
11193                                 }
11194
11195                                 break;
11196                         }
11197                 }
11198         }
11199
11200         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
11201                                                 ptype_num, 0);
11202         if (ret)
11203                 PMD_DRV_LOG(ERR, "Failed to update mapping table.");
11204
11205         rte_free(ptype_mapping);
11206         rte_free(ptype);
11207         return ret;
11208 }
11209
11210 void
11211 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
11212                               uint32_t pkg_size)
11213 {
11214         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11215         uint32_t proto_num;
11216         struct rte_pmd_i40e_proto_info *proto;
11217         uint32_t buff_size;
11218         uint32_t i;
11219         int ret;
11220
11221         /* get information about protocol number */
11222         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11223                                        (uint8_t *)&proto_num, sizeof(proto_num),
11224                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
11225         if (ret) {
11226                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
11227                 return;
11228         }
11229         if (!proto_num) {
11230                 PMD_DRV_LOG(INFO, "No new protocol added");
11231                 return;
11232         }
11233
11234         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
11235         proto = rte_zmalloc("new_proto", buff_size, 0);
11236         if (!proto) {
11237                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11238                 return;
11239         }
11240
11241         /* get information about protocol list */
11242         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11243                                         (uint8_t *)proto, buff_size,
11244                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
11245         if (ret) {
11246                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
11247                 rte_free(proto);
11248                 return;
11249         }
11250
11251         /* Check if GTP is supported. */
11252         for (i = 0; i < proto_num; i++) {
11253                 if (!strncmp(proto[i].name, "GTP", 3)) {
11254                         pf->gtp_support = true;
11255                         break;
11256                 }
11257         }
11258
11259         /* Update customized pctype info */
11260         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
11261                                             proto_num, proto);
11262         if (ret)
11263                 PMD_DRV_LOG(INFO, "No pctype is updated.");
11264
11265         /* Update customized ptype info */
11266         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
11267                                            proto_num, proto);
11268         if (ret)
11269                 PMD_DRV_LOG(INFO, "No ptype is updated.");
11270
11271         rte_free(proto);
11272 }
11273
11274 /* Create a QinQ cloud filter
11275  *
11276  * The Fortville NIC has limited resources for tunnel filters,
11277  * so we can only reuse existing filters.
11278  *
11279  * In step 1 we define which Field Vector fields can be used for
11280  * filter types.
11281  * As we do not have the inner tag defined as a field,
11282  * we have to define it first, by reusing one of L1 entries.
11283  *
11284  * In step 2 we are replacing one of existing filter types with
11285  * a new one for QinQ.
11286  * As we reusing L1 and replacing L2, some of the default filter
11287  * types will disappear,which depends on L1 and L2 entries we reuse.
11288  *
11289  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
11290  *
11291  * 1.   Create L1 filter of outer vlan (12b) which will be in use
11292  *              later when we define the cloud filter.
11293  *      a.      Valid_flags.replace_cloud = 0
11294  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
11295  *      c.      New_filter = 0x10
11296  *      d.      TR bit = 0xff (optional, not used here)
11297  *      e.      Buffer – 2 entries:
11298  *              i.      Byte 0 = 8 (outer vlan FV index).
11299  *                      Byte 1 = 0 (rsv)
11300  *                      Byte 2-3 = 0x0fff
11301  *              ii.     Byte 0 = 37 (inner vlan FV index).
11302  *                      Byte 1 =0 (rsv)
11303  *                      Byte 2-3 = 0x0fff
11304  *
11305  * Step 2:
11306  * 2.   Create cloud filter using two L1 filters entries: stag and
11307  *              new filter(outer vlan+ inner vlan)
11308  *      a.      Valid_flags.replace_cloud = 1
11309  *      b.      Old_filter = 1 (instead of outer IP)
11310  *      c.      New_filter = 0x10
11311  *      d.      Buffer – 2 entries:
11312  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
11313  *                      Byte 1-3 = 0 (rsv)
11314  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
11315  *                      Byte 9-11 = 0 (rsv)
11316  */
11317 static int
11318 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
11319 {
11320         int ret = -ENOTSUP;
11321         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
11322         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
11323         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11324
11325         /* Init */
11326         memset(&filter_replace, 0,
11327                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
11328         memset(&filter_replace_buf, 0,
11329                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
11330
11331         /* create L1 filter */
11332         filter_replace.old_filter_type =
11333                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
11334         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11335         filter_replace.tr_bit = 0;
11336
11337         /* Prepare the buffer, 2 entries */
11338         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
11339         filter_replace_buf.data[0] |=
11340                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11341         /* Field Vector 12b mask */
11342         filter_replace_buf.data[2] = 0xff;
11343         filter_replace_buf.data[3] = 0x0f;
11344         filter_replace_buf.data[4] =
11345                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
11346         filter_replace_buf.data[4] |=
11347                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11348         /* Field Vector 12b mask */
11349         filter_replace_buf.data[6] = 0xff;
11350         filter_replace_buf.data[7] = 0x0f;
11351         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
11352                         &filter_replace_buf);
11353         if (ret != I40E_SUCCESS)
11354                 return ret;
11355
11356         /* Apply the second L2 cloud filter */
11357         memset(&filter_replace, 0,
11358                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
11359         memset(&filter_replace_buf, 0,
11360                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
11361
11362         /* create L2 filter, input for L2 filter will be L1 filter  */
11363         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
11364         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
11365         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11366
11367         /* Prepare the buffer, 2 entries */
11368         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
11369         filter_replace_buf.data[0] |=
11370                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11371         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11372         filter_replace_buf.data[4] |=
11373                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11374         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
11375                         &filter_replace_buf);
11376         return ret;
11377 }
11378
11379 RTE_INIT(i40e_init_log);
11380 static void
11381 i40e_init_log(void)
11382 {
11383         i40e_logtype_init = rte_log_register("pmd.i40e.init");
11384         if (i40e_logtype_init >= 0)
11385                 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
11386         i40e_logtype_driver = rte_log_register("pmd.i40e.driver");
11387         if (i40e_logtype_driver >= 0)
11388                 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
11389 }