net/i40e: fix VSI MAC filter on primary address change
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_eal.h>
15 #include <rte_string_fns.h>
16 #include <rte_pci.h>
17 #include <rte_bus_pci.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev.h>
20 #include <rte_ethdev_pci.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_alarm.h>
25 #include <rte_dev.h>
26 #include <rte_eth_ctrl.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29
30 #include "i40e_logs.h"
31 #include "base/i40e_prototype.h"
32 #include "base/i40e_adminq_cmd.h"
33 #include "base/i40e_type.h"
34 #include "base/i40e_register.h"
35 #include "base/i40e_dcb.h"
36 #include "base/i40e_diag.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42
43 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
45
46 #define I40E_CLEAR_PXE_WAIT_MS     200
47
48 /* Maximun number of capability elements */
49 #define I40E_MAX_CAP_ELE_NUM       128
50
51 /* Wait count and interval */
52 #define I40E_CHK_Q_ENA_COUNT       1000
53 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
54
55 /* Maximun number of VSI */
56 #define I40E_MAX_NUM_VSIS          (384UL)
57
58 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
59
60 /* Flow control default timer */
61 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
62
63 /* Flow control enable fwd bit */
64 #define I40E_PRTMAC_FWD_CTRL   0x00000001
65
66 /* Receive Packet Buffer size */
67 #define I40E_RXPBSIZE (968 * 1024)
68
69 /* Kilobytes shift */
70 #define I40E_KILOSHIFT 10
71
72 /* Flow control default high water */
73 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
74
75 /* Flow control default low water */
76 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
77
78 /* Receive Average Packet Size in Byte*/
79 #define I40E_PACKET_AVERAGE_SIZE 128
80
81 /* Mask of PF interrupt causes */
82 #define I40E_PFINT_ICR0_ENA_MASK ( \
83                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
84                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
85                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
86                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
87                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
88                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
89                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
90                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
91                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
92
93 #define I40E_FLOW_TYPES ( \
94         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
95         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
96         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
97         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
98         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
99         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
100         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
104         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
105
106 /* Additional timesync values. */
107 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
108 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
109 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
110 #define I40E_PRTTSYN_TSYNENA     0x80000000
111 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
112 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
113
114 /**
115  * Below are values for writing un-exposed registers suggested
116  * by silicon experts
117  */
118 /* Destination MAC address */
119 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
120 /* Source MAC address */
121 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
122 /* Outer (S-Tag) VLAN tag in the outer L2 header */
123 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
124 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
125 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
126 /* Single VLAN tag in the inner L2 header */
127 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
128 /* Source IPv4 address */
129 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
130 /* Destination IPv4 address */
131 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
132 /* Source IPv4 address for X722 */
133 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
134 /* Destination IPv4 address for X722 */
135 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
136 /* IPv4 Protocol for X722 */
137 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
138 /* IPv4 Time to Live for X722 */
139 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
140 /* IPv4 Type of Service (TOS) */
141 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
142 /* IPv4 Protocol */
143 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
144 /* IPv4 Time to Live */
145 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
146 /* Source IPv6 address */
147 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
148 /* Destination IPv6 address */
149 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
150 /* IPv6 Traffic Class (TC) */
151 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
152 /* IPv6 Next Header */
153 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
154 /* IPv6 Hop Limit */
155 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
156 /* Source L4 port */
157 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
158 /* Destination L4 port */
159 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
160 /* SCTP verification tag */
161 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
162 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
163 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
164 /* Source port of tunneling UDP */
165 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
166 /* Destination port of tunneling UDP */
167 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
168 /* UDP Tunneling ID, NVGRE/GRE key */
169 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
170 /* Last ether type */
171 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
172 /* Tunneling outer destination IPv4 address */
173 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
174 /* Tunneling outer destination IPv6 address */
175 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
176 /* 1st word of flex payload */
177 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
178 /* 2nd word of flex payload */
179 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
180 /* 3rd word of flex payload */
181 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
182 /* 4th word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
184 /* 5th word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
186 /* 6th word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
188 /* 7th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
190 /* 8th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
192 /* all 8 words flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
194 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
195
196 #define I40E_TRANSLATE_INSET 0
197 #define I40E_TRANSLATE_REG   1
198
199 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
200 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
201 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
202 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
203 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
204 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
205
206 /* PCI offset for querying capability */
207 #define PCI_DEV_CAP_REG            0xA4
208 /* PCI offset for enabling/disabling Extended Tag */
209 #define PCI_DEV_CTRL_REG           0xA8
210 /* Bit mask of Extended Tag capability */
211 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
212 /* Bit shift of Extended Tag enable/disable */
213 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
214 /* Bit mask of Extended Tag enable/disable */
215 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
216
217 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
218 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
219 static int i40e_dev_configure(struct rte_eth_dev *dev);
220 static int i40e_dev_start(struct rte_eth_dev *dev);
221 static void i40e_dev_stop(struct rte_eth_dev *dev);
222 static void i40e_dev_close(struct rte_eth_dev *dev);
223 static int  i40e_dev_reset(struct rte_eth_dev *dev);
224 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
225 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
226 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
227 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
228 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
229 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
230 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
231                                struct rte_eth_stats *stats);
232 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
233                                struct rte_eth_xstat *xstats, unsigned n);
234 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
235                                      struct rte_eth_xstat_name *xstats_names,
236                                      unsigned limit);
237 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
238 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
239                                             uint16_t queue_id,
240                                             uint8_t stat_idx,
241                                             uint8_t is_rx);
242 static int i40e_fw_version_get(struct rte_eth_dev *dev,
243                                 char *fw_version, size_t fw_size);
244 static void i40e_dev_info_get(struct rte_eth_dev *dev,
245                               struct rte_eth_dev_info *dev_info);
246 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
247                                 uint16_t vlan_id,
248                                 int on);
249 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
250                               enum rte_vlan_type vlan_type,
251                               uint16_t tpid);
252 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
253 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
254                                       uint16_t queue,
255                                       int on);
256 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
257 static int i40e_dev_led_on(struct rte_eth_dev *dev);
258 static int i40e_dev_led_off(struct rte_eth_dev *dev);
259 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
260                               struct rte_eth_fc_conf *fc_conf);
261 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
262                               struct rte_eth_fc_conf *fc_conf);
263 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
264                                        struct rte_eth_pfc_conf *pfc_conf);
265 static int i40e_macaddr_add(struct rte_eth_dev *dev,
266                             struct ether_addr *mac_addr,
267                             uint32_t index,
268                             uint32_t pool);
269 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
270 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
271                                     struct rte_eth_rss_reta_entry64 *reta_conf,
272                                     uint16_t reta_size);
273 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
274                                    struct rte_eth_rss_reta_entry64 *reta_conf,
275                                    uint16_t reta_size);
276
277 static int i40e_get_cap(struct i40e_hw *hw);
278 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
279 static int i40e_pf_setup(struct i40e_pf *pf);
280 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
281 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
282 static int i40e_dcb_setup(struct rte_eth_dev *dev);
283 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
284                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
285 static void i40e_stat_update_48(struct i40e_hw *hw,
286                                uint32_t hireg,
287                                uint32_t loreg,
288                                bool offset_loaded,
289                                uint64_t *offset,
290                                uint64_t *stat);
291 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
292 static void i40e_dev_interrupt_handler(void *param);
293 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
294                                 uint32_t base, uint32_t num);
295 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
296 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
297                         uint32_t base);
298 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
299                         uint16_t num);
300 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
301 static int i40e_veb_release(struct i40e_veb *veb);
302 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
303                                                 struct i40e_vsi *vsi);
304 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
305 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
306 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
307                                              struct i40e_macvlan_filter *mv_f,
308                                              int num,
309                                              uint16_t vlan);
310 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
311 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
312                                     struct rte_eth_rss_conf *rss_conf);
313 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
314                                       struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
316                                         struct rte_eth_udp_tunnel *udp_tunnel);
317 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
318                                         struct rte_eth_udp_tunnel *udp_tunnel);
319 static void i40e_filter_input_set_init(struct i40e_pf *pf);
320 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
321                                 enum rte_filter_op filter_op,
322                                 void *arg);
323 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
324                                 enum rte_filter_type filter_type,
325                                 enum rte_filter_op filter_op,
326                                 void *arg);
327 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
328                                   struct rte_eth_dcb_info *dcb_info);
329 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
330 static void i40e_configure_registers(struct i40e_hw *hw);
331 static void i40e_hw_init(struct rte_eth_dev *dev);
332 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
333 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
334                                                      uint16_t seid,
335                                                      uint16_t rule_type,
336                                                      uint16_t *entries,
337                                                      uint16_t count,
338                                                      uint16_t rule_id);
339 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
340                         struct rte_eth_mirror_conf *mirror_conf,
341                         uint8_t sw_id, uint8_t on);
342 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
343
344 static int i40e_timesync_enable(struct rte_eth_dev *dev);
345 static int i40e_timesync_disable(struct rte_eth_dev *dev);
346 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
347                                            struct timespec *timestamp,
348                                            uint32_t flags);
349 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
350                                            struct timespec *timestamp);
351 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
352
353 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
354
355 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
356                                    struct timespec *timestamp);
357 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
358                                     const struct timespec *timestamp);
359
360 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
361                                          uint16_t queue_id);
362 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
363                                           uint16_t queue_id);
364
365 static int i40e_get_regs(struct rte_eth_dev *dev,
366                          struct rte_dev_reg_info *regs);
367
368 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
369
370 static int i40e_get_eeprom(struct rte_eth_dev *dev,
371                            struct rte_dev_eeprom_info *eeprom);
372
373 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
374                                       struct ether_addr *mac_addr);
375
376 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
377
378 static int i40e_ethertype_filter_convert(
379         const struct rte_eth_ethertype_filter *input,
380         struct i40e_ethertype_filter *filter);
381 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
382                                    struct i40e_ethertype_filter *filter);
383
384 static int i40e_tunnel_filter_convert(
385         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
386         struct i40e_tunnel_filter *tunnel_filter);
387 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
388                                 struct i40e_tunnel_filter *tunnel_filter);
389 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
390
391 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
392 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
393 static void i40e_filter_restore(struct i40e_pf *pf);
394 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
395
396 int i40e_logtype_init;
397 int i40e_logtype_driver;
398
399 static const struct rte_pci_id pci_id_i40e_map[] = {
400         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
401         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
402         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
403         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
404         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
405         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
406         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
407         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
408         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
409         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
410         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
411         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
412         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
413         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
419         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
420         { .vendor_id = 0, /* sentinel */ },
421 };
422
423 static const struct eth_dev_ops i40e_eth_dev_ops = {
424         .dev_configure                = i40e_dev_configure,
425         .dev_start                    = i40e_dev_start,
426         .dev_stop                     = i40e_dev_stop,
427         .dev_close                    = i40e_dev_close,
428         .dev_reset                    = i40e_dev_reset,
429         .promiscuous_enable           = i40e_dev_promiscuous_enable,
430         .promiscuous_disable          = i40e_dev_promiscuous_disable,
431         .allmulticast_enable          = i40e_dev_allmulticast_enable,
432         .allmulticast_disable         = i40e_dev_allmulticast_disable,
433         .dev_set_link_up              = i40e_dev_set_link_up,
434         .dev_set_link_down            = i40e_dev_set_link_down,
435         .link_update                  = i40e_dev_link_update,
436         .stats_get                    = i40e_dev_stats_get,
437         .xstats_get                   = i40e_dev_xstats_get,
438         .xstats_get_names             = i40e_dev_xstats_get_names,
439         .stats_reset                  = i40e_dev_stats_reset,
440         .xstats_reset                 = i40e_dev_stats_reset,
441         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
442         .fw_version_get               = i40e_fw_version_get,
443         .dev_infos_get                = i40e_dev_info_get,
444         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
445         .vlan_filter_set              = i40e_vlan_filter_set,
446         .vlan_tpid_set                = i40e_vlan_tpid_set,
447         .vlan_offload_set             = i40e_vlan_offload_set,
448         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
449         .vlan_pvid_set                = i40e_vlan_pvid_set,
450         .rx_queue_start               = i40e_dev_rx_queue_start,
451         .rx_queue_stop                = i40e_dev_rx_queue_stop,
452         .tx_queue_start               = i40e_dev_tx_queue_start,
453         .tx_queue_stop                = i40e_dev_tx_queue_stop,
454         .rx_queue_setup               = i40e_dev_rx_queue_setup,
455         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
456         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
457         .rx_queue_release             = i40e_dev_rx_queue_release,
458         .rx_queue_count               = i40e_dev_rx_queue_count,
459         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
460         .rx_descriptor_status         = i40e_dev_rx_descriptor_status,
461         .tx_descriptor_status         = i40e_dev_tx_descriptor_status,
462         .tx_queue_setup               = i40e_dev_tx_queue_setup,
463         .tx_queue_release             = i40e_dev_tx_queue_release,
464         .dev_led_on                   = i40e_dev_led_on,
465         .dev_led_off                  = i40e_dev_led_off,
466         .flow_ctrl_get                = i40e_flow_ctrl_get,
467         .flow_ctrl_set                = i40e_flow_ctrl_set,
468         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
469         .mac_addr_add                 = i40e_macaddr_add,
470         .mac_addr_remove              = i40e_macaddr_remove,
471         .reta_update                  = i40e_dev_rss_reta_update,
472         .reta_query                   = i40e_dev_rss_reta_query,
473         .rss_hash_update              = i40e_dev_rss_hash_update,
474         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
475         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
476         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
477         .filter_ctrl                  = i40e_dev_filter_ctrl,
478         .rxq_info_get                 = i40e_rxq_info_get,
479         .txq_info_get                 = i40e_txq_info_get,
480         .mirror_rule_set              = i40e_mirror_rule_set,
481         .mirror_rule_reset            = i40e_mirror_rule_reset,
482         .timesync_enable              = i40e_timesync_enable,
483         .timesync_disable             = i40e_timesync_disable,
484         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
485         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
486         .get_dcb_info                 = i40e_dev_get_dcb_info,
487         .timesync_adjust_time         = i40e_timesync_adjust_time,
488         .timesync_read_time           = i40e_timesync_read_time,
489         .timesync_write_time          = i40e_timesync_write_time,
490         .get_reg                      = i40e_get_regs,
491         .get_eeprom_length            = i40e_get_eeprom_length,
492         .get_eeprom                   = i40e_get_eeprom,
493         .mac_addr_set                 = i40e_set_default_mac_addr,
494         .mtu_set                      = i40e_dev_mtu_set,
495         .tm_ops_get                   = i40e_tm_ops_get,
496 };
497
498 /* store statistics names and its offset in stats structure */
499 struct rte_i40e_xstats_name_off {
500         char name[RTE_ETH_XSTATS_NAME_SIZE];
501         unsigned offset;
502 };
503
504 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
505         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
506         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
507         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
508         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
509         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
510                 rx_unknown_protocol)},
511         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
512         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
513         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
514         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
515 };
516
517 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
518                 sizeof(rte_i40e_stats_strings[0]))
519
520 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
521         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
522                 tx_dropped_link_down)},
523         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
524         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
525                 illegal_bytes)},
526         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
527         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
528                 mac_local_faults)},
529         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
530                 mac_remote_faults)},
531         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
532                 rx_length_errors)},
533         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
534         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
535         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
536         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
537         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
538         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
539                 rx_size_127)},
540         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
541                 rx_size_255)},
542         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
543                 rx_size_511)},
544         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
545                 rx_size_1023)},
546         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
547                 rx_size_1522)},
548         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
549                 rx_size_big)},
550         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
551                 rx_undersize)},
552         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
553                 rx_oversize)},
554         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
555                 mac_short_packet_dropped)},
556         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
557                 rx_fragments)},
558         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
559         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
560         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
561                 tx_size_127)},
562         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
563                 tx_size_255)},
564         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
565                 tx_size_511)},
566         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
567                 tx_size_1023)},
568         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
569                 tx_size_1522)},
570         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
571                 tx_size_big)},
572         {"rx_flow_director_atr_match_packets",
573                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
574         {"rx_flow_director_sb_match_packets",
575                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
576         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
577                 tx_lpi_status)},
578         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
579                 rx_lpi_status)},
580         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
581                 tx_lpi_count)},
582         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
583                 rx_lpi_count)},
584 };
585
586 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
587                 sizeof(rte_i40e_hw_port_strings[0]))
588
589 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
590         {"xon_packets", offsetof(struct i40e_hw_port_stats,
591                 priority_xon_rx)},
592         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
593                 priority_xoff_rx)},
594 };
595
596 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
597                 sizeof(rte_i40e_rxq_prio_strings[0]))
598
599 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
600         {"xon_packets", offsetof(struct i40e_hw_port_stats,
601                 priority_xon_tx)},
602         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
603                 priority_xoff_tx)},
604         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
605                 priority_xon_2_xoff)},
606 };
607
608 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
609                 sizeof(rte_i40e_txq_prio_strings[0]))
610
611 static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
612         struct rte_pci_device *pci_dev)
613 {
614         return rte_eth_dev_pci_generic_probe(pci_dev,
615                 sizeof(struct i40e_adapter), eth_i40e_dev_init);
616 }
617
618 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
619 {
620         return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
621 }
622
623 static struct rte_pci_driver rte_i40e_pmd = {
624         .id_table = pci_id_i40e_map,
625         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
626                      RTE_PCI_DRV_IOVA_AS_VA,
627         .probe = eth_i40e_pci_probe,
628         .remove = eth_i40e_pci_remove,
629 };
630
631 static inline int
632 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
633                                      struct rte_eth_link *link)
634 {
635         struct rte_eth_link *dst = link;
636         struct rte_eth_link *src = &(dev->data->dev_link);
637
638         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
639                                         *(uint64_t *)src) == 0)
640                 return -1;
641
642         return 0;
643 }
644
645 static inline int
646 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
647                                       struct rte_eth_link *link)
648 {
649         struct rte_eth_link *dst = &(dev->data->dev_link);
650         struct rte_eth_link *src = link;
651
652         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
653                                         *(uint64_t *)src) == 0)
654                 return -1;
655
656         return 0;
657 }
658
659 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
660 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
661 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
662
663 #ifndef I40E_GLQF_ORT
664 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
665 #endif
666 #ifndef I40E_GLQF_PIT
667 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
668 #endif
669 #ifndef I40E_GLQF_L3_MAP
670 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
671 #endif
672
673 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
674 {
675         /*
676          * Initialize registers for parsing packet type of QinQ
677          * This should be removed from code once proper
678          * configuration API is added to avoid configuration conflicts
679          * between ports of the same device.
680          */
681         I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
682         I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
683 }
684
685 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
686
687 /*
688  * Add a ethertype filter to drop all flow control frames transmitted
689  * from VSIs.
690 */
691 static void
692 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
693 {
694         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
695         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
696                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
697                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
698         int ret;
699
700         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
701                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
702                                 pf->main_vsi_seid, 0,
703                                 TRUE, NULL, NULL);
704         if (ret)
705                 PMD_INIT_LOG(ERR,
706                         "Failed to add filter to drop flow control frames from VSIs.");
707 }
708
709 static int
710 floating_veb_list_handler(__rte_unused const char *key,
711                           const char *floating_veb_value,
712                           void *opaque)
713 {
714         int idx = 0;
715         unsigned int count = 0;
716         char *end = NULL;
717         int min, max;
718         bool *vf_floating_veb = opaque;
719
720         while (isblank(*floating_veb_value))
721                 floating_veb_value++;
722
723         /* Reset floating VEB configuration for VFs */
724         for (idx = 0; idx < I40E_MAX_VF; idx++)
725                 vf_floating_veb[idx] = false;
726
727         min = I40E_MAX_VF;
728         do {
729                 while (isblank(*floating_veb_value))
730                         floating_veb_value++;
731                 if (*floating_veb_value == '\0')
732                         return -1;
733                 errno = 0;
734                 idx = strtoul(floating_veb_value, &end, 10);
735                 if (errno || end == NULL)
736                         return -1;
737                 while (isblank(*end))
738                         end++;
739                 if (*end == '-') {
740                         min = idx;
741                 } else if ((*end == ';') || (*end == '\0')) {
742                         max = idx;
743                         if (min == I40E_MAX_VF)
744                                 min = idx;
745                         if (max >= I40E_MAX_VF)
746                                 max = I40E_MAX_VF - 1;
747                         for (idx = min; idx <= max; idx++) {
748                                 vf_floating_veb[idx] = true;
749                                 count++;
750                         }
751                         min = I40E_MAX_VF;
752                 } else {
753                         return -1;
754                 }
755                 floating_veb_value = end + 1;
756         } while (*end != '\0');
757
758         if (count == 0)
759                 return -1;
760
761         return 0;
762 }
763
764 static void
765 config_vf_floating_veb(struct rte_devargs *devargs,
766                        uint16_t floating_veb,
767                        bool *vf_floating_veb)
768 {
769         struct rte_kvargs *kvlist;
770         int i;
771         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
772
773         if (!floating_veb)
774                 return;
775         /* All the VFs attach to the floating VEB by default
776          * when the floating VEB is enabled.
777          */
778         for (i = 0; i < I40E_MAX_VF; i++)
779                 vf_floating_veb[i] = true;
780
781         if (devargs == NULL)
782                 return;
783
784         kvlist = rte_kvargs_parse(devargs->args, NULL);
785         if (kvlist == NULL)
786                 return;
787
788         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
789                 rte_kvargs_free(kvlist);
790                 return;
791         }
792         /* When the floating_veb_list parameter exists, all the VFs
793          * will attach to the legacy VEB firstly, then configure VFs
794          * to the floating VEB according to the floating_veb_list.
795          */
796         if (rte_kvargs_process(kvlist, floating_veb_list,
797                                floating_veb_list_handler,
798                                vf_floating_veb) < 0) {
799                 rte_kvargs_free(kvlist);
800                 return;
801         }
802         rte_kvargs_free(kvlist);
803 }
804
805 static int
806 i40e_check_floating_handler(__rte_unused const char *key,
807                             const char *value,
808                             __rte_unused void *opaque)
809 {
810         if (strcmp(value, "1"))
811                 return -1;
812
813         return 0;
814 }
815
816 static int
817 is_floating_veb_supported(struct rte_devargs *devargs)
818 {
819         struct rte_kvargs *kvlist;
820         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
821
822         if (devargs == NULL)
823                 return 0;
824
825         kvlist = rte_kvargs_parse(devargs->args, NULL);
826         if (kvlist == NULL)
827                 return 0;
828
829         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
830                 rte_kvargs_free(kvlist);
831                 return 0;
832         }
833         /* Floating VEB is enabled when there's key-value:
834          * enable_floating_veb=1
835          */
836         if (rte_kvargs_process(kvlist, floating_veb_key,
837                                i40e_check_floating_handler, NULL) < 0) {
838                 rte_kvargs_free(kvlist);
839                 return 0;
840         }
841         rte_kvargs_free(kvlist);
842
843         return 1;
844 }
845
846 static void
847 config_floating_veb(struct rte_eth_dev *dev)
848 {
849         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
850         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
851         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
852
853         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
854
855         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
856                 pf->floating_veb =
857                         is_floating_veb_supported(pci_dev->device.devargs);
858                 config_vf_floating_veb(pci_dev->device.devargs,
859                                        pf->floating_veb,
860                                        pf->floating_veb_list);
861         } else {
862                 pf->floating_veb = false;
863         }
864 }
865
866 #define I40E_L2_TAGS_S_TAG_SHIFT 1
867 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
868
869 static int
870 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
871 {
872         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
873         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
874         char ethertype_hash_name[RTE_HASH_NAMESIZE];
875         int ret;
876
877         struct rte_hash_parameters ethertype_hash_params = {
878                 .name = ethertype_hash_name,
879                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
880                 .key_len = sizeof(struct i40e_ethertype_filter_input),
881                 .hash_func = rte_hash_crc,
882                 .hash_func_init_val = 0,
883                 .socket_id = rte_socket_id(),
884         };
885
886         /* Initialize ethertype filter rule list and hash */
887         TAILQ_INIT(&ethertype_rule->ethertype_list);
888         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
889                  "ethertype_%s", dev->device->name);
890         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
891         if (!ethertype_rule->hash_table) {
892                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
893                 return -EINVAL;
894         }
895         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
896                                        sizeof(struct i40e_ethertype_filter *) *
897                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
898                                        0);
899         if (!ethertype_rule->hash_map) {
900                 PMD_INIT_LOG(ERR,
901                              "Failed to allocate memory for ethertype hash map!");
902                 ret = -ENOMEM;
903                 goto err_ethertype_hash_map_alloc;
904         }
905
906         return 0;
907
908 err_ethertype_hash_map_alloc:
909         rte_hash_free(ethertype_rule->hash_table);
910
911         return ret;
912 }
913
914 static int
915 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
916 {
917         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
918         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
919         char tunnel_hash_name[RTE_HASH_NAMESIZE];
920         int ret;
921
922         struct rte_hash_parameters tunnel_hash_params = {
923                 .name = tunnel_hash_name,
924                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
925                 .key_len = sizeof(struct i40e_tunnel_filter_input),
926                 .hash_func = rte_hash_crc,
927                 .hash_func_init_val = 0,
928                 .socket_id = rte_socket_id(),
929         };
930
931         /* Initialize tunnel filter rule list and hash */
932         TAILQ_INIT(&tunnel_rule->tunnel_list);
933         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
934                  "tunnel_%s", dev->device->name);
935         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
936         if (!tunnel_rule->hash_table) {
937                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
938                 return -EINVAL;
939         }
940         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
941                                     sizeof(struct i40e_tunnel_filter *) *
942                                     I40E_MAX_TUNNEL_FILTER_NUM,
943                                     0);
944         if (!tunnel_rule->hash_map) {
945                 PMD_INIT_LOG(ERR,
946                              "Failed to allocate memory for tunnel hash map!");
947                 ret = -ENOMEM;
948                 goto err_tunnel_hash_map_alloc;
949         }
950
951         return 0;
952
953 err_tunnel_hash_map_alloc:
954         rte_hash_free(tunnel_rule->hash_table);
955
956         return ret;
957 }
958
959 static int
960 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
961 {
962         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
963         struct i40e_fdir_info *fdir_info = &pf->fdir;
964         char fdir_hash_name[RTE_HASH_NAMESIZE];
965         int ret;
966
967         struct rte_hash_parameters fdir_hash_params = {
968                 .name = fdir_hash_name,
969                 .entries = I40E_MAX_FDIR_FILTER_NUM,
970                 .key_len = sizeof(struct i40e_fdir_input),
971                 .hash_func = rte_hash_crc,
972                 .hash_func_init_val = 0,
973                 .socket_id = rte_socket_id(),
974         };
975
976         /* Initialize flow director filter rule list and hash */
977         TAILQ_INIT(&fdir_info->fdir_list);
978         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
979                  "fdir_%s", dev->device->name);
980         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
981         if (!fdir_info->hash_table) {
982                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
983                 return -EINVAL;
984         }
985         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
986                                           sizeof(struct i40e_fdir_filter *) *
987                                           I40E_MAX_FDIR_FILTER_NUM,
988                                           0);
989         if (!fdir_info->hash_map) {
990                 PMD_INIT_LOG(ERR,
991                              "Failed to allocate memory for fdir hash map!");
992                 ret = -ENOMEM;
993                 goto err_fdir_hash_map_alloc;
994         }
995         return 0;
996
997 err_fdir_hash_map_alloc:
998         rte_hash_free(fdir_info->hash_table);
999
1000         return ret;
1001 }
1002
1003 static void
1004 i40e_init_customized_info(struct i40e_pf *pf)
1005 {
1006         int i;
1007
1008         /* Initialize customized pctype */
1009         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1010                 pf->customized_pctype[i].index = i;
1011                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1012                 pf->customized_pctype[i].valid = false;
1013         }
1014
1015         pf->gtp_support = false;
1016 }
1017
1018 void
1019 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1020 {
1021         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1022         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1023         struct i40e_queue_regions *info = &pf->queue_region;
1024         uint16_t i;
1025
1026         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1027                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1028
1029         memset(info, 0, sizeof(struct i40e_queue_regions));
1030 }
1031
1032 static int
1033 eth_i40e_dev_init(struct rte_eth_dev *dev)
1034 {
1035         struct rte_pci_device *pci_dev;
1036         struct rte_intr_handle *intr_handle;
1037         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1038         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1039         struct i40e_vsi *vsi;
1040         int ret;
1041         uint32_t len;
1042         uint8_t aq_fail = 0;
1043
1044         PMD_INIT_FUNC_TRACE();
1045
1046         dev->dev_ops = &i40e_eth_dev_ops;
1047         dev->rx_pkt_burst = i40e_recv_pkts;
1048         dev->tx_pkt_burst = i40e_xmit_pkts;
1049         dev->tx_pkt_prepare = i40e_prep_pkts;
1050
1051         /* for secondary processes, we don't initialise any further as primary
1052          * has already done this work. Only check we don't need a different
1053          * RX function */
1054         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1055                 i40e_set_rx_function(dev);
1056                 i40e_set_tx_function(dev);
1057                 return 0;
1058         }
1059         i40e_set_default_ptype_table(dev);
1060         i40e_set_default_pctype_table(dev);
1061         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1062         intr_handle = &pci_dev->intr_handle;
1063
1064         rte_eth_copy_pci_info(dev, pci_dev);
1065
1066         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1067         pf->adapter->eth_dev = dev;
1068         pf->dev_data = dev->data;
1069
1070         hw->back = I40E_PF_TO_ADAPTER(pf);
1071         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1072         if (!hw->hw_addr) {
1073                 PMD_INIT_LOG(ERR,
1074                         "Hardware is not available, as address is NULL");
1075                 return -ENODEV;
1076         }
1077
1078         hw->vendor_id = pci_dev->id.vendor_id;
1079         hw->device_id = pci_dev->id.device_id;
1080         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1081         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1082         hw->bus.device = pci_dev->addr.devid;
1083         hw->bus.func = pci_dev->addr.function;
1084         hw->adapter_stopped = 0;
1085
1086         /* Make sure all is clean before doing PF reset */
1087         i40e_clear_hw(hw);
1088
1089         /* Initialize the hardware */
1090         i40e_hw_init(dev);
1091
1092         /* Reset here to make sure all is clean for each PF */
1093         ret = i40e_pf_reset(hw);
1094         if (ret) {
1095                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1096                 return ret;
1097         }
1098
1099         /* Initialize the shared code (base driver) */
1100         ret = i40e_init_shared_code(hw);
1101         if (ret) {
1102                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1103                 return ret;
1104         }
1105
1106         /*
1107          * To work around the NVM issue, initialize registers
1108          * for packet type of QinQ by software.
1109          * It should be removed once issues are fixed in NVM.
1110          */
1111         i40e_GLQF_reg_init(hw);
1112
1113         /* Initialize the input set for filters (hash and fd) to default value */
1114         i40e_filter_input_set_init(pf);
1115
1116         /* Initialize the parameters for adminq */
1117         i40e_init_adminq_parameter(hw);
1118         ret = i40e_init_adminq(hw);
1119         if (ret != I40E_SUCCESS) {
1120                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1121                 return -EIO;
1122         }
1123         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1124                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1125                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1126                      ((hw->nvm.version >> 12) & 0xf),
1127                      ((hw->nvm.version >> 4) & 0xff),
1128                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1129
1130         /* initialise the L3_MAP register */
1131         ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
1132                                    0x00000028,  NULL);
1133         if (ret)
1134                 PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret);
1135
1136         /* Need the special FW version to support floating VEB */
1137         config_floating_veb(dev);
1138         /* Clear PXE mode */
1139         i40e_clear_pxe_mode(hw);
1140         i40e_dev_sync_phy_type(hw);
1141
1142         /*
1143          * On X710, performance number is far from the expectation on recent
1144          * firmware versions. The fix for this issue may not be integrated in
1145          * the following firmware version. So the workaround in software driver
1146          * is needed. It needs to modify the initial values of 3 internal only
1147          * registers. Note that the workaround can be removed when it is fixed
1148          * in firmware in the future.
1149          */
1150         i40e_configure_registers(hw);
1151
1152         /* Get hw capabilities */
1153         ret = i40e_get_cap(hw);
1154         if (ret != I40E_SUCCESS) {
1155                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1156                 goto err_get_capabilities;
1157         }
1158
1159         /* Initialize parameters for PF */
1160         ret = i40e_pf_parameter_init(dev);
1161         if (ret != 0) {
1162                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1163                 goto err_parameter_init;
1164         }
1165
1166         /* Initialize the queue management */
1167         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1168         if (ret < 0) {
1169                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1170                 goto err_qp_pool_init;
1171         }
1172         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1173                                 hw->func_caps.num_msix_vectors - 1);
1174         if (ret < 0) {
1175                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1176                 goto err_msix_pool_init;
1177         }
1178
1179         /* Initialize lan hmc */
1180         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1181                                 hw->func_caps.num_rx_qp, 0, 0);
1182         if (ret != I40E_SUCCESS) {
1183                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1184                 goto err_init_lan_hmc;
1185         }
1186
1187         /* Configure lan hmc */
1188         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1189         if (ret != I40E_SUCCESS) {
1190                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1191                 goto err_configure_lan_hmc;
1192         }
1193
1194         /* Get and check the mac address */
1195         i40e_get_mac_addr(hw, hw->mac.addr);
1196         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1197                 PMD_INIT_LOG(ERR, "mac address is not valid");
1198                 ret = -EIO;
1199                 goto err_get_mac_addr;
1200         }
1201         /* Copy the permanent MAC address */
1202         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1203                         (struct ether_addr *) hw->mac.perm_addr);
1204
1205         /* Disable flow control */
1206         hw->fc.requested_mode = I40E_FC_NONE;
1207         i40e_set_fc(hw, &aq_fail, TRUE);
1208
1209         /* Set the global registers with default ether type value */
1210         ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
1211         if (ret != I40E_SUCCESS) {
1212                 PMD_INIT_LOG(ERR,
1213                         "Failed to set the default outer VLAN ether type");
1214                 goto err_setup_pf_switch;
1215         }
1216
1217         /* PF setup, which includes VSI setup */
1218         ret = i40e_pf_setup(pf);
1219         if (ret) {
1220                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1221                 goto err_setup_pf_switch;
1222         }
1223
1224         /* reset all stats of the device, including pf and main vsi */
1225         i40e_dev_stats_reset(dev);
1226
1227         vsi = pf->main_vsi;
1228
1229         /* Disable double vlan by default */
1230         i40e_vsi_config_double_vlan(vsi, FALSE);
1231
1232         /* Disable S-TAG identification when floating_veb is disabled */
1233         if (!pf->floating_veb) {
1234                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1235                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1236                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1237                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1238                 }
1239         }
1240
1241         if (!vsi->max_macaddrs)
1242                 len = ETHER_ADDR_LEN;
1243         else
1244                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1245
1246         /* Should be after VSI initialized */
1247         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1248         if (!dev->data->mac_addrs) {
1249                 PMD_INIT_LOG(ERR,
1250                         "Failed to allocated memory for storing mac address");
1251                 goto err_mac_alloc;
1252         }
1253         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1254                                         &dev->data->mac_addrs[0]);
1255
1256         /* Init dcb to sw mode by default */
1257         ret = i40e_dcb_init_configure(dev, TRUE);
1258         if (ret != I40E_SUCCESS) {
1259                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1260                 pf->flags &= ~I40E_FLAG_DCB;
1261         }
1262         /* Update HW struct after DCB configuration */
1263         i40e_get_cap(hw);
1264
1265         /* initialize pf host driver to setup SRIOV resource if applicable */
1266         i40e_pf_host_init(dev);
1267
1268         /* register callback func to eal lib */
1269         rte_intr_callback_register(intr_handle,
1270                                    i40e_dev_interrupt_handler, dev);
1271
1272         /* configure and enable device interrupt */
1273         i40e_pf_config_irq0(hw, TRUE);
1274         i40e_pf_enable_irq0(hw);
1275
1276         /* enable uio intr after callback register */
1277         rte_intr_enable(intr_handle);
1278
1279         /* By default disable flexible payload in global configuration */
1280         i40e_flex_payload_reg_set_default(hw);
1281
1282         /*
1283          * Add an ethertype filter to drop all flow control frames transmitted
1284          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1285          * frames to wire.
1286          */
1287         i40e_add_tx_flow_control_drop_filter(pf);
1288
1289         /* Set the max frame size to 0x2600 by default,
1290          * in case other drivers changed the default value.
1291          */
1292         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1293
1294         /* initialize mirror rule list */
1295         TAILQ_INIT(&pf->mirror_list);
1296
1297         /* initialize Traffic Manager configuration */
1298         i40e_tm_conf_init(dev);
1299
1300         /* Initialize customized information */
1301         i40e_init_customized_info(pf);
1302
1303         ret = i40e_init_ethtype_filter_list(dev);
1304         if (ret < 0)
1305                 goto err_init_ethtype_filter_list;
1306         ret = i40e_init_tunnel_filter_list(dev);
1307         if (ret < 0)
1308                 goto err_init_tunnel_filter_list;
1309         ret = i40e_init_fdir_filter_list(dev);
1310         if (ret < 0)
1311                 goto err_init_fdir_filter_list;
1312
1313         /* initialize queue region configuration */
1314         i40e_init_queue_region_conf(dev);
1315
1316         return 0;
1317
1318 err_init_fdir_filter_list:
1319         rte_free(pf->tunnel.hash_table);
1320         rte_free(pf->tunnel.hash_map);
1321 err_init_tunnel_filter_list:
1322         rte_free(pf->ethertype.hash_table);
1323         rte_free(pf->ethertype.hash_map);
1324 err_init_ethtype_filter_list:
1325         rte_free(dev->data->mac_addrs);
1326 err_mac_alloc:
1327         i40e_vsi_release(pf->main_vsi);
1328 err_setup_pf_switch:
1329 err_get_mac_addr:
1330 err_configure_lan_hmc:
1331         (void)i40e_shutdown_lan_hmc(hw);
1332 err_init_lan_hmc:
1333         i40e_res_pool_destroy(&pf->msix_pool);
1334 err_msix_pool_init:
1335         i40e_res_pool_destroy(&pf->qp_pool);
1336 err_qp_pool_init:
1337 err_parameter_init:
1338 err_get_capabilities:
1339         (void)i40e_shutdown_adminq(hw);
1340
1341         return ret;
1342 }
1343
1344 static void
1345 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1346 {
1347         struct i40e_ethertype_filter *p_ethertype;
1348         struct i40e_ethertype_rule *ethertype_rule;
1349
1350         ethertype_rule = &pf->ethertype;
1351         /* Remove all ethertype filter rules and hash */
1352         if (ethertype_rule->hash_map)
1353                 rte_free(ethertype_rule->hash_map);
1354         if (ethertype_rule->hash_table)
1355                 rte_hash_free(ethertype_rule->hash_table);
1356
1357         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1358                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1359                              p_ethertype, rules);
1360                 rte_free(p_ethertype);
1361         }
1362 }
1363
1364 static void
1365 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1366 {
1367         struct i40e_tunnel_filter *p_tunnel;
1368         struct i40e_tunnel_rule *tunnel_rule;
1369
1370         tunnel_rule = &pf->tunnel;
1371         /* Remove all tunnel director rules and hash */
1372         if (tunnel_rule->hash_map)
1373                 rte_free(tunnel_rule->hash_map);
1374         if (tunnel_rule->hash_table)
1375                 rte_hash_free(tunnel_rule->hash_table);
1376
1377         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1378                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1379                 rte_free(p_tunnel);
1380         }
1381 }
1382
1383 static void
1384 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1385 {
1386         struct i40e_fdir_filter *p_fdir;
1387         struct i40e_fdir_info *fdir_info;
1388
1389         fdir_info = &pf->fdir;
1390         /* Remove all flow director rules and hash */
1391         if (fdir_info->hash_map)
1392                 rte_free(fdir_info->hash_map);
1393         if (fdir_info->hash_table)
1394                 rte_hash_free(fdir_info->hash_table);
1395
1396         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1397                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1398                 rte_free(p_fdir);
1399         }
1400 }
1401
1402 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1403 {
1404         /*
1405          * Disable by default flexible payload
1406          * for corresponding L2/L3/L4 layers.
1407          */
1408         I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1409         I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1410         I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1411 }
1412
1413 static int
1414 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1415 {
1416         struct i40e_pf *pf;
1417         struct rte_pci_device *pci_dev;
1418         struct rte_intr_handle *intr_handle;
1419         struct i40e_hw *hw;
1420         struct i40e_filter_control_settings settings;
1421         struct rte_flow *p_flow;
1422         int ret;
1423         uint8_t aq_fail = 0;
1424
1425         PMD_INIT_FUNC_TRACE();
1426
1427         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1428                 return 0;
1429
1430         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1431         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1432         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1433         intr_handle = &pci_dev->intr_handle;
1434
1435         if (hw->adapter_stopped == 0)
1436                 i40e_dev_close(dev);
1437
1438         dev->dev_ops = NULL;
1439         dev->rx_pkt_burst = NULL;
1440         dev->tx_pkt_burst = NULL;
1441
1442         /* Clear PXE mode */
1443         i40e_clear_pxe_mode(hw);
1444
1445         /* Unconfigure filter control */
1446         memset(&settings, 0, sizeof(settings));
1447         ret = i40e_set_filter_control(hw, &settings);
1448         if (ret)
1449                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1450                                         ret);
1451
1452         /* Disable flow control */
1453         hw->fc.requested_mode = I40E_FC_NONE;
1454         i40e_set_fc(hw, &aq_fail, TRUE);
1455
1456         /* uninitialize pf host driver */
1457         i40e_pf_host_uninit(dev);
1458
1459         rte_free(dev->data->mac_addrs);
1460         dev->data->mac_addrs = NULL;
1461
1462         /* disable uio intr before callback unregister */
1463         rte_intr_disable(intr_handle);
1464
1465         /* register callback func to eal lib */
1466         rte_intr_callback_unregister(intr_handle,
1467                                      i40e_dev_interrupt_handler, dev);
1468
1469         i40e_rm_ethtype_filter_list(pf);
1470         i40e_rm_tunnel_filter_list(pf);
1471         i40e_rm_fdir_filter_list(pf);
1472
1473         /* Remove all flows */
1474         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1475                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1476                 rte_free(p_flow);
1477         }
1478
1479         /* Remove all Traffic Manager configuration */
1480         i40e_tm_conf_uninit(dev);
1481
1482         return 0;
1483 }
1484
1485 static int
1486 i40e_dev_configure(struct rte_eth_dev *dev)
1487 {
1488         struct i40e_adapter *ad =
1489                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1490         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1491         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1492         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1493         int i, ret;
1494
1495         ret = i40e_dev_sync_phy_type(hw);
1496         if (ret)
1497                 return ret;
1498
1499         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1500          * bulk allocation or vector Rx preconditions we will reset it.
1501          */
1502         ad->rx_bulk_alloc_allowed = true;
1503         ad->rx_vec_allowed = true;
1504         ad->tx_simple_allowed = true;
1505         ad->tx_vec_allowed = true;
1506
1507         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1508                 ret = i40e_fdir_setup(pf);
1509                 if (ret != I40E_SUCCESS) {
1510                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1511                         return -ENOTSUP;
1512                 }
1513                 ret = i40e_fdir_configure(dev);
1514                 if (ret < 0) {
1515                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1516                         goto err;
1517                 }
1518         } else
1519                 i40e_fdir_teardown(pf);
1520
1521         ret = i40e_dev_init_vlan(dev);
1522         if (ret < 0)
1523                 goto err;
1524
1525         /* VMDQ setup.
1526          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1527          *  RSS setting have different requirements.
1528          *  General PMD driver call sequence are NIC init, configure,
1529          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1530          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1531          *  applicable. So, VMDQ setting has to be done before
1532          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1533          *  For RSS setting, it will try to calculate actual configured RX queue
1534          *  number, which will be available after rx_queue_setup(). dev_start()
1535          *  function is good to place RSS setup.
1536          */
1537         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1538                 ret = i40e_vmdq_setup(dev);
1539                 if (ret)
1540                         goto err;
1541         }
1542
1543         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1544                 ret = i40e_dcb_setup(dev);
1545                 if (ret) {
1546                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1547                         goto err_dcb;
1548                 }
1549         }
1550
1551         TAILQ_INIT(&pf->flow_list);
1552
1553         return 0;
1554
1555 err_dcb:
1556         /* need to release vmdq resource if exists */
1557         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1558                 i40e_vsi_release(pf->vmdq[i].vsi);
1559                 pf->vmdq[i].vsi = NULL;
1560         }
1561         rte_free(pf->vmdq);
1562         pf->vmdq = NULL;
1563 err:
1564         /* need to release fdir resource if exists */
1565         i40e_fdir_teardown(pf);
1566         return ret;
1567 }
1568
1569 void
1570 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1571 {
1572         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1573         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1574         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1575         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1576         uint16_t msix_vect = vsi->msix_intr;
1577         uint16_t i;
1578
1579         for (i = 0; i < vsi->nb_qps; i++) {
1580                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1581                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1582                 rte_wmb();
1583         }
1584
1585         if (vsi->type != I40E_VSI_SRIOV) {
1586                 if (!rte_intr_allow_others(intr_handle)) {
1587                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1588                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1589                         I40E_WRITE_REG(hw,
1590                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1591                                        0);
1592                 } else {
1593                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1594                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1595                         I40E_WRITE_REG(hw,
1596                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1597                                                        msix_vect - 1), 0);
1598                 }
1599         } else {
1600                 uint32_t reg;
1601                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1602                         vsi->user_param + (msix_vect - 1);
1603
1604                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1605                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1606         }
1607         I40E_WRITE_FLUSH(hw);
1608 }
1609
1610 static void
1611 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1612                        int base_queue, int nb_queue,
1613                        uint16_t itr_idx)
1614 {
1615         int i;
1616         uint32_t val;
1617         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1618
1619         /* Bind all RX queues to allocated MSIX interrupt */
1620         for (i = 0; i < nb_queue; i++) {
1621                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1622                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1623                         ((base_queue + i + 1) <<
1624                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1625                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1626                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1627
1628                 if (i == nb_queue - 1)
1629                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1630                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1631         }
1632
1633         /* Write first RX queue to Link list register as the head element */
1634         if (vsi->type != I40E_VSI_SRIOV) {
1635                 uint16_t interval =
1636                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1);
1637
1638                 if (msix_vect == I40E_MISC_VEC_ID) {
1639                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1640                                        (base_queue <<
1641                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1642                                        (0x0 <<
1643                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1644                         I40E_WRITE_REG(hw,
1645                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1646                                        interval);
1647                 } else {
1648                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1649                                        (base_queue <<
1650                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1651                                        (0x0 <<
1652                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1653                         I40E_WRITE_REG(hw,
1654                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1655                                                        msix_vect - 1),
1656                                        interval);
1657                 }
1658         } else {
1659                 uint32_t reg;
1660
1661                 if (msix_vect == I40E_MISC_VEC_ID) {
1662                         I40E_WRITE_REG(hw,
1663                                        I40E_VPINT_LNKLST0(vsi->user_param),
1664                                        (base_queue <<
1665                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1666                                        (0x0 <<
1667                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1668                 } else {
1669                         /* num_msix_vectors_vf needs to minus irq0 */
1670                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1671                                 vsi->user_param + (msix_vect - 1);
1672
1673                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1674                                        (base_queue <<
1675                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1676                                        (0x0 <<
1677                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1678                 }
1679         }
1680
1681         I40E_WRITE_FLUSH(hw);
1682 }
1683
1684 void
1685 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
1686 {
1687         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1688         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1689         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1690         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1691         uint16_t msix_vect = vsi->msix_intr;
1692         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1693         uint16_t queue_idx = 0;
1694         int record = 0;
1695         uint32_t val;
1696         int i;
1697
1698         for (i = 0; i < vsi->nb_qps; i++) {
1699                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1700                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1701         }
1702
1703         /* INTENA flag is not auto-cleared for interrupt */
1704         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
1705         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
1706                 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
1707                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
1708         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
1709
1710         /* VF bind interrupt */
1711         if (vsi->type == I40E_VSI_SRIOV) {
1712                 __vsi_queues_bind_intr(vsi, msix_vect,
1713                                        vsi->base_queue, vsi->nb_qps,
1714                                        itr_idx);
1715                 return;
1716         }
1717
1718         /* PF & VMDq bind interrupt */
1719         if (rte_intr_dp_is_en(intr_handle)) {
1720                 if (vsi->type == I40E_VSI_MAIN) {
1721                         queue_idx = 0;
1722                         record = 1;
1723                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1724                         struct i40e_vsi *main_vsi =
1725                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1726                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1727                         record = 1;
1728                 }
1729         }
1730
1731         for (i = 0; i < vsi->nb_used_qps; i++) {
1732                 if (nb_msix <= 1) {
1733                         if (!rte_intr_allow_others(intr_handle))
1734                                 /* allow to share MISC_VEC_ID */
1735                                 msix_vect = I40E_MISC_VEC_ID;
1736
1737                         /* no enough msix_vect, map all to one */
1738                         __vsi_queues_bind_intr(vsi, msix_vect,
1739                                                vsi->base_queue + i,
1740                                                vsi->nb_used_qps - i,
1741                                                itr_idx);
1742                         for (; !!record && i < vsi->nb_used_qps; i++)
1743                                 intr_handle->intr_vec[queue_idx + i] =
1744                                         msix_vect;
1745                         break;
1746                 }
1747                 /* 1:1 queue/msix_vect mapping */
1748                 __vsi_queues_bind_intr(vsi, msix_vect,
1749                                        vsi->base_queue + i, 1,
1750                                        itr_idx);
1751                 if (!!record)
1752                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1753
1754                 msix_vect++;
1755                 nb_msix--;
1756         }
1757 }
1758
1759 static void
1760 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1761 {
1762         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1763         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1764         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1765         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1766         uint16_t interval = i40e_calc_itr_interval(\
1767                 RTE_LIBRTE_I40E_ITR_INTERVAL, 1);
1768         uint16_t msix_intr, i;
1769
1770         if (rte_intr_allow_others(intr_handle))
1771                 for (i = 0; i < vsi->nb_msix; i++) {
1772                         msix_intr = vsi->msix_intr + i;
1773                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1774                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1775                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1776                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1777                                 (interval <<
1778                                  I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
1779                 }
1780         else
1781                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1782                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1783                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1784                                (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1785                                (interval <<
1786                                 I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
1787
1788         I40E_WRITE_FLUSH(hw);
1789 }
1790
1791 static void
1792 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1793 {
1794         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1795         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1796         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1797         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1798         uint16_t msix_intr, i;
1799
1800         if (rte_intr_allow_others(intr_handle))
1801                 for (i = 0; i < vsi->nb_msix; i++) {
1802                         msix_intr = vsi->msix_intr + i;
1803                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1804                                        0);
1805                 }
1806         else
1807                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
1808
1809         I40E_WRITE_FLUSH(hw);
1810 }
1811
1812 static inline uint8_t
1813 i40e_parse_link_speeds(uint16_t link_speeds)
1814 {
1815         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1816
1817         if (link_speeds & ETH_LINK_SPEED_40G)
1818                 link_speed |= I40E_LINK_SPEED_40GB;
1819         if (link_speeds & ETH_LINK_SPEED_25G)
1820                 link_speed |= I40E_LINK_SPEED_25GB;
1821         if (link_speeds & ETH_LINK_SPEED_20G)
1822                 link_speed |= I40E_LINK_SPEED_20GB;
1823         if (link_speeds & ETH_LINK_SPEED_10G)
1824                 link_speed |= I40E_LINK_SPEED_10GB;
1825         if (link_speeds & ETH_LINK_SPEED_1G)
1826                 link_speed |= I40E_LINK_SPEED_1GB;
1827         if (link_speeds & ETH_LINK_SPEED_100M)
1828                 link_speed |= I40E_LINK_SPEED_100MB;
1829
1830         return link_speed;
1831 }
1832
1833 static int
1834 i40e_phy_conf_link(struct i40e_hw *hw,
1835                    uint8_t abilities,
1836                    uint8_t force_speed,
1837                    bool is_up)
1838 {
1839         enum i40e_status_code status;
1840         struct i40e_aq_get_phy_abilities_resp phy_ab;
1841         struct i40e_aq_set_phy_config phy_conf;
1842         enum i40e_aq_phy_type cnt;
1843         uint32_t phy_type_mask = 0;
1844
1845         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1846                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1847                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1848                         I40E_AQ_PHY_FLAG_LOW_POWER;
1849         const uint8_t advt = I40E_LINK_SPEED_40GB |
1850                         I40E_LINK_SPEED_25GB |
1851                         I40E_LINK_SPEED_10GB |
1852                         I40E_LINK_SPEED_1GB |
1853                         I40E_LINK_SPEED_100MB;
1854         int ret = -ENOTSUP;
1855
1856
1857         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1858                                               NULL);
1859         if (status)
1860                 return ret;
1861
1862         /* If link already up, no need to set up again */
1863         if (is_up && phy_ab.phy_type != 0)
1864                 return I40E_SUCCESS;
1865
1866         memset(&phy_conf, 0, sizeof(phy_conf));
1867
1868         /* bits 0-2 use the values from get_phy_abilities_resp */
1869         abilities &= ~mask;
1870         abilities |= phy_ab.abilities & mask;
1871
1872         /* update ablities and speed */
1873         if (abilities & I40E_AQ_PHY_AN_ENABLED)
1874                 phy_conf.link_speed = advt;
1875         else
1876                 phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
1877
1878         phy_conf.abilities = abilities;
1879
1880
1881
1882         /* To enable link, phy_type mask needs to include each type */
1883         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
1884                 phy_type_mask |= 1 << cnt;
1885
1886         /* use get_phy_abilities_resp value for the rest */
1887         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
1888         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
1889                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
1890                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
1891         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
1892         phy_conf.eee_capability = phy_ab.eee_capability;
1893         phy_conf.eeer = phy_ab.eeer_val;
1894         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1895
1896         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1897                     phy_ab.abilities, phy_ab.link_speed);
1898         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
1899                     phy_conf.abilities, phy_conf.link_speed);
1900
1901         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
1902         if (status)
1903                 return ret;
1904
1905         return I40E_SUCCESS;
1906 }
1907
1908 static int
1909 i40e_apply_link_speed(struct rte_eth_dev *dev)
1910 {
1911         uint8_t speed;
1912         uint8_t abilities = 0;
1913         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1914         struct rte_eth_conf *conf = &dev->data->dev_conf;
1915
1916         speed = i40e_parse_link_speeds(conf->link_speeds);
1917         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1918         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
1919                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1920         abilities |= I40E_AQ_PHY_LINK_ENABLED;
1921
1922         return i40e_phy_conf_link(hw, abilities, speed, true);
1923 }
1924
1925 static int
1926 i40e_dev_start(struct rte_eth_dev *dev)
1927 {
1928         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1929         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1930         struct i40e_vsi *main_vsi = pf->main_vsi;
1931         int ret, i;
1932         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1933         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1934         uint32_t intr_vector = 0;
1935         struct i40e_vsi *vsi;
1936
1937         hw->adapter_stopped = 0;
1938
1939         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1940                 PMD_INIT_LOG(ERR,
1941                 "Invalid link_speeds for port %u, autonegotiation disabled",
1942                               dev->data->port_id);
1943                 return -EINVAL;
1944         }
1945
1946         rte_intr_disable(intr_handle);
1947
1948         if ((rte_intr_cap_multiple(intr_handle) ||
1949              !RTE_ETH_DEV_SRIOV(dev).active) &&
1950             dev->data->dev_conf.intr_conf.rxq != 0) {
1951                 intr_vector = dev->data->nb_rx_queues;
1952                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
1953                 if (ret)
1954                         return ret;
1955         }
1956
1957         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1958                 intr_handle->intr_vec =
1959                         rte_zmalloc("intr_vec",
1960                                     dev->data->nb_rx_queues * sizeof(int),
1961                                     0);
1962                 if (!intr_handle->intr_vec) {
1963                         PMD_INIT_LOG(ERR,
1964                                 "Failed to allocate %d rx_queues intr_vec",
1965                                 dev->data->nb_rx_queues);
1966                         return -ENOMEM;
1967                 }
1968         }
1969
1970         /* Initialize VSI */
1971         ret = i40e_dev_rxtx_init(pf);
1972         if (ret != I40E_SUCCESS) {
1973                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
1974                 goto err_up;
1975         }
1976
1977         /* Map queues with MSIX interrupt */
1978         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
1979                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1980         i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
1981         i40e_vsi_enable_queues_intr(main_vsi);
1982
1983         /* Map VMDQ VSI queues with MSIX interrupt */
1984         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1985                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1986                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
1987                                           I40E_ITR_INDEX_DEFAULT);
1988                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
1989         }
1990
1991         /* enable FDIR MSIX interrupt */
1992         if (pf->fdir.fdir_vsi) {
1993                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
1994                                           I40E_ITR_INDEX_NONE);
1995                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1996         }
1997
1998         /* Enable all queues which have been configured */
1999         ret = i40e_dev_switch_queues(pf, TRUE);
2000
2001         if (ret != I40E_SUCCESS) {
2002                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
2003                 goto err_up;
2004         }
2005
2006         /* Enable receiving broadcast packets */
2007         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2008         if (ret != I40E_SUCCESS)
2009                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2010
2011         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2012                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2013                                                 true, NULL);
2014                 if (ret != I40E_SUCCESS)
2015                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2016         }
2017
2018         /* Enable the VLAN promiscuous mode. */
2019         if (pf->vfs) {
2020                 for (i = 0; i < pf->vf_num; i++) {
2021                         vsi = pf->vfs[i].vsi;
2022                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2023                                                      true, NULL);
2024                 }
2025         }
2026
2027         /* Enable mac loopback mode */
2028         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2029             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2030                 ret = i40e_diag_set_loopback(hw, dev->data->dev_conf.lpbk_mode);
2031                 if (ret != I40E_SUCCESS) {
2032                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2033                         goto err_up;
2034                 }
2035         }
2036
2037         /* Apply link configure */
2038         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
2039                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
2040                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
2041                                 ETH_LINK_SPEED_40G)) {
2042                 PMD_DRV_LOG(ERR, "Invalid link setting");
2043                 goto err_up;
2044         }
2045         ret = i40e_apply_link_speed(dev);
2046         if (I40E_SUCCESS != ret) {
2047                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2048                 goto err_up;
2049         }
2050
2051         if (!rte_intr_allow_others(intr_handle)) {
2052                 rte_intr_callback_unregister(intr_handle,
2053                                              i40e_dev_interrupt_handler,
2054                                              (void *)dev);
2055                 /* configure and enable device interrupt */
2056                 i40e_pf_config_irq0(hw, FALSE);
2057                 i40e_pf_enable_irq0(hw);
2058
2059                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2060                         PMD_INIT_LOG(INFO,
2061                                 "lsc won't enable because of no intr multiplex");
2062         } else {
2063                 ret = i40e_aq_set_phy_int_mask(hw,
2064                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2065                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2066                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2067                 if (ret != I40E_SUCCESS)
2068                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2069
2070                 /* Call get_link_info aq commond to enable/disable LSE */
2071                 i40e_dev_link_update(dev, 0);
2072         }
2073
2074         /* enable uio intr after callback register */
2075         rte_intr_enable(intr_handle);
2076
2077         i40e_filter_restore(pf);
2078
2079         if (pf->tm_conf.root && !pf->tm_conf.committed)
2080                 PMD_DRV_LOG(WARNING,
2081                             "please call hierarchy_commit() "
2082                             "before starting the port");
2083
2084         return I40E_SUCCESS;
2085
2086 err_up:
2087         i40e_dev_switch_queues(pf, FALSE);
2088         i40e_dev_clear_queues(dev);
2089
2090         return ret;
2091 }
2092
2093 static void
2094 i40e_dev_stop(struct rte_eth_dev *dev)
2095 {
2096         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2097         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2098         struct i40e_vsi *main_vsi = pf->main_vsi;
2099         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2100         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2101         int i;
2102
2103         if (hw->adapter_stopped == 1)
2104                 return;
2105         /* Disable all queues */
2106         i40e_dev_switch_queues(pf, FALSE);
2107
2108         /* un-map queues with interrupt registers */
2109         i40e_vsi_disable_queues_intr(main_vsi);
2110         i40e_vsi_queues_unbind_intr(main_vsi);
2111
2112         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2113                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2114                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2115         }
2116
2117         if (pf->fdir.fdir_vsi) {
2118                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2119                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2120         }
2121         /* Clear all queues and release memory */
2122         i40e_dev_clear_queues(dev);
2123
2124         /* Set link down */
2125         i40e_dev_set_link_down(dev);
2126
2127         if (!rte_intr_allow_others(intr_handle))
2128                 /* resume to the default handler */
2129                 rte_intr_callback_register(intr_handle,
2130                                            i40e_dev_interrupt_handler,
2131                                            (void *)dev);
2132
2133         /* Clean datapath event and queue/vec mapping */
2134         rte_intr_efd_disable(intr_handle);
2135         if (intr_handle->intr_vec) {
2136                 rte_free(intr_handle->intr_vec);
2137                 intr_handle->intr_vec = NULL;
2138         }
2139
2140         /* reset hierarchy commit */
2141         pf->tm_conf.committed = false;
2142
2143         /* Remove all the queue region configuration */
2144         i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
2145
2146         hw->adapter_stopped = 1;
2147 }
2148
2149 static void
2150 i40e_dev_close(struct rte_eth_dev *dev)
2151 {
2152         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2153         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2154         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2155         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2156         struct i40e_mirror_rule *p_mirror;
2157         uint32_t reg;
2158         int i;
2159         int ret;
2160
2161         PMD_INIT_FUNC_TRACE();
2162
2163         i40e_dev_stop(dev);
2164
2165         /* Remove all mirror rules */
2166         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2167                 ret = i40e_aq_del_mirror_rule(hw,
2168                                               pf->main_vsi->veb->seid,
2169                                               p_mirror->rule_type,
2170                                               p_mirror->entries,
2171                                               p_mirror->num_entries,
2172                                               p_mirror->id);
2173                 if (ret < 0)
2174                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2175                                     "status = %d, aq_err = %d.", ret,
2176                                     hw->aq.asq_last_status);
2177
2178                 /* remove mirror software resource anyway */
2179                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2180                 rte_free(p_mirror);
2181                 pf->nb_mirror_rule--;
2182         }
2183
2184         i40e_dev_free_queues(dev);
2185
2186         /* Disable interrupt */
2187         i40e_pf_disable_irq0(hw);
2188         rte_intr_disable(intr_handle);
2189
2190         /* shutdown and destroy the HMC */
2191         i40e_shutdown_lan_hmc(hw);
2192
2193         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2194                 i40e_vsi_release(pf->vmdq[i].vsi);
2195                 pf->vmdq[i].vsi = NULL;
2196         }
2197         rte_free(pf->vmdq);
2198         pf->vmdq = NULL;
2199
2200         /* release all the existing VSIs and VEBs */
2201         i40e_fdir_teardown(pf);
2202         i40e_vsi_release(pf->main_vsi);
2203
2204         /* shutdown the adminq */
2205         i40e_aq_queue_shutdown(hw, true);
2206         i40e_shutdown_adminq(hw);
2207
2208         i40e_res_pool_destroy(&pf->qp_pool);
2209         i40e_res_pool_destroy(&pf->msix_pool);
2210
2211         /* Disable flexible payload in global configuration */
2212         i40e_flex_payload_reg_set_default(hw);
2213
2214         /* force a PF reset to clean anything leftover */
2215         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2216         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2217                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2218         I40E_WRITE_FLUSH(hw);
2219 }
2220
2221 /*
2222  * Reset PF device only to re-initialize resources in PMD layer
2223  */
2224 static int
2225 i40e_dev_reset(struct rte_eth_dev *dev)
2226 {
2227         int ret;
2228
2229         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2230          * its VF to make them align with it. The detailed notification
2231          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2232          * To avoid unexpected behavior in VF, currently reset of PF with
2233          * SR-IOV activation is not supported. It might be supported later.
2234          */
2235         if (dev->data->sriov.active)
2236                 return -ENOTSUP;
2237
2238         ret = eth_i40e_dev_uninit(dev);
2239         if (ret)
2240                 return ret;
2241
2242         ret = eth_i40e_dev_init(dev);
2243
2244         return ret;
2245 }
2246
2247 static void
2248 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2249 {
2250         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2251         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2252         struct i40e_vsi *vsi = pf->main_vsi;
2253         int status;
2254
2255         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2256                                                      true, NULL, true);
2257         if (status != I40E_SUCCESS)
2258                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2259
2260         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2261                                                         TRUE, NULL);
2262         if (status != I40E_SUCCESS)
2263                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2264
2265 }
2266
2267 static void
2268 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2269 {
2270         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2271         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2272         struct i40e_vsi *vsi = pf->main_vsi;
2273         int status;
2274
2275         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2276                                                      false, NULL, true);
2277         if (status != I40E_SUCCESS)
2278                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2279
2280         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2281                                                         false, NULL);
2282         if (status != I40E_SUCCESS)
2283                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2284 }
2285
2286 static void
2287 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2288 {
2289         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2290         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2291         struct i40e_vsi *vsi = pf->main_vsi;
2292         int ret;
2293
2294         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2295         if (ret != I40E_SUCCESS)
2296                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2297 }
2298
2299 static void
2300 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2301 {
2302         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2303         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2304         struct i40e_vsi *vsi = pf->main_vsi;
2305         int ret;
2306
2307         if (dev->data->promiscuous == 1)
2308                 return; /* must remain in all_multicast mode */
2309
2310         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2311                                 vsi->seid, FALSE, NULL);
2312         if (ret != I40E_SUCCESS)
2313                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2314 }
2315
2316 /*
2317  * Set device link up.
2318  */
2319 static int
2320 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2321 {
2322         /* re-apply link speed setting */
2323         return i40e_apply_link_speed(dev);
2324 }
2325
2326 /*
2327  * Set device link down.
2328  */
2329 static int
2330 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2331 {
2332         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2333         uint8_t abilities = 0;
2334         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2335
2336         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2337         return i40e_phy_conf_link(hw, abilities, speed, false);
2338 }
2339
2340 int
2341 i40e_dev_link_update(struct rte_eth_dev *dev,
2342                      int wait_to_complete)
2343 {
2344 #define CHECK_INTERVAL 100  /* 100ms */
2345 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
2346         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2347         struct i40e_link_status link_status;
2348         struct rte_eth_link link, old;
2349         int status;
2350         unsigned rep_cnt = MAX_REPEAT_TIME;
2351         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2352
2353         memset(&link, 0, sizeof(link));
2354         memset(&old, 0, sizeof(old));
2355         memset(&link_status, 0, sizeof(link_status));
2356         rte_i40e_dev_atomic_read_link_status(dev, &old);
2357
2358         do {
2359                 /* Get link status information from hardware */
2360                 status = i40e_aq_get_link_info(hw, enable_lse,
2361                                                 &link_status, NULL);
2362                 if (status != I40E_SUCCESS) {
2363                         link.link_speed = ETH_SPEED_NUM_100M;
2364                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2365                         PMD_DRV_LOG(ERR, "Failed to get link info");
2366                         goto out;
2367                 }
2368
2369                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
2370                 if (!wait_to_complete || link.link_status)
2371                         break;
2372
2373                 rte_delay_ms(CHECK_INTERVAL);
2374         } while (--rep_cnt);
2375
2376         if (!link.link_status)
2377                 goto out;
2378
2379         /* i40e uses full duplex only */
2380         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2381
2382         /* Parse the link status */
2383         switch (link_status.link_speed) {
2384         case I40E_LINK_SPEED_100MB:
2385                 link.link_speed = ETH_SPEED_NUM_100M;
2386                 break;
2387         case I40E_LINK_SPEED_1GB:
2388                 link.link_speed = ETH_SPEED_NUM_1G;
2389                 break;
2390         case I40E_LINK_SPEED_10GB:
2391                 link.link_speed = ETH_SPEED_NUM_10G;
2392                 break;
2393         case I40E_LINK_SPEED_20GB:
2394                 link.link_speed = ETH_SPEED_NUM_20G;
2395                 break;
2396         case I40E_LINK_SPEED_25GB:
2397                 link.link_speed = ETH_SPEED_NUM_25G;
2398                 break;
2399         case I40E_LINK_SPEED_40GB:
2400                 link.link_speed = ETH_SPEED_NUM_40G;
2401                 break;
2402         default:
2403                 link.link_speed = ETH_SPEED_NUM_100M;
2404                 break;
2405         }
2406
2407         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2408                         ETH_LINK_SPEED_FIXED);
2409
2410 out:
2411         rte_i40e_dev_atomic_write_link_status(dev, &link);
2412         if (link.link_status == old.link_status)
2413                 return -1;
2414
2415         i40e_notify_all_vfs_link_status(dev);
2416
2417         return 0;
2418 }
2419
2420 /* Get all the statistics of a VSI */
2421 void
2422 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2423 {
2424         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2425         struct i40e_eth_stats *nes = &vsi->eth_stats;
2426         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2427         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2428
2429         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2430                             vsi->offset_loaded, &oes->rx_bytes,
2431                             &nes->rx_bytes);
2432         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2433                             vsi->offset_loaded, &oes->rx_unicast,
2434                             &nes->rx_unicast);
2435         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2436                             vsi->offset_loaded, &oes->rx_multicast,
2437                             &nes->rx_multicast);
2438         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2439                             vsi->offset_loaded, &oes->rx_broadcast,
2440                             &nes->rx_broadcast);
2441         /* exclude CRC bytes */
2442         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2443                 nes->rx_broadcast) * ETHER_CRC_LEN;
2444
2445         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2446                             &oes->rx_discards, &nes->rx_discards);
2447         /* GLV_REPC not supported */
2448         /* GLV_RMPC not supported */
2449         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2450                             &oes->rx_unknown_protocol,
2451                             &nes->rx_unknown_protocol);
2452         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2453                             vsi->offset_loaded, &oes->tx_bytes,
2454                             &nes->tx_bytes);
2455         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2456                             vsi->offset_loaded, &oes->tx_unicast,
2457                             &nes->tx_unicast);
2458         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2459                             vsi->offset_loaded, &oes->tx_multicast,
2460                             &nes->tx_multicast);
2461         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2462                             vsi->offset_loaded,  &oes->tx_broadcast,
2463                             &nes->tx_broadcast);
2464         /* GLV_TDPC not supported */
2465         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2466                             &oes->tx_errors, &nes->tx_errors);
2467         vsi->offset_loaded = true;
2468
2469         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2470                     vsi->vsi_id);
2471         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2472         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2473         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2474         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2475         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2476         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2477                     nes->rx_unknown_protocol);
2478         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2479         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2480         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2481         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2482         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2483         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2484         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2485                     vsi->vsi_id);
2486 }
2487
2488 static void
2489 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2490 {
2491         unsigned int i;
2492         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2493         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2494
2495         /* Get rx/tx bytes of internal transfer packets */
2496         i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2497                         I40E_GLV_GORCL(hw->port),
2498                         pf->offset_loaded,
2499                         &pf->internal_stats_offset.rx_bytes,
2500                         &pf->internal_stats.rx_bytes);
2501
2502         i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2503                         I40E_GLV_GOTCL(hw->port),
2504                         pf->offset_loaded,
2505                         &pf->internal_stats_offset.tx_bytes,
2506                         &pf->internal_stats.tx_bytes);
2507         /* Get total internal rx packet count */
2508         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2509                             I40E_GLV_UPRCL(hw->port),
2510                             pf->offset_loaded,
2511                             &pf->internal_stats_offset.rx_unicast,
2512                             &pf->internal_stats.rx_unicast);
2513         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2514                             I40E_GLV_MPRCL(hw->port),
2515                             pf->offset_loaded,
2516                             &pf->internal_stats_offset.rx_multicast,
2517                             &pf->internal_stats.rx_multicast);
2518         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2519                             I40E_GLV_BPRCL(hw->port),
2520                             pf->offset_loaded,
2521                             &pf->internal_stats_offset.rx_broadcast,
2522                             &pf->internal_stats.rx_broadcast);
2523         /* Get total internal tx packet count */
2524         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
2525                             I40E_GLV_UPTCL(hw->port),
2526                             pf->offset_loaded,
2527                             &pf->internal_stats_offset.tx_unicast,
2528                             &pf->internal_stats.tx_unicast);
2529         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
2530                             I40E_GLV_MPTCL(hw->port),
2531                             pf->offset_loaded,
2532                             &pf->internal_stats_offset.tx_multicast,
2533                             &pf->internal_stats.tx_multicast);
2534         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
2535                             I40E_GLV_BPTCL(hw->port),
2536                             pf->offset_loaded,
2537                             &pf->internal_stats_offset.tx_broadcast,
2538                             &pf->internal_stats.tx_broadcast);
2539
2540         /* exclude CRC size */
2541         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2542                 pf->internal_stats.rx_multicast +
2543                 pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2544
2545         /* Get statistics of struct i40e_eth_stats */
2546         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2547                             I40E_GLPRT_GORCL(hw->port),
2548                             pf->offset_loaded, &os->eth.rx_bytes,
2549                             &ns->eth.rx_bytes);
2550         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2551                             I40E_GLPRT_UPRCL(hw->port),
2552                             pf->offset_loaded, &os->eth.rx_unicast,
2553                             &ns->eth.rx_unicast);
2554         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2555                             I40E_GLPRT_MPRCL(hw->port),
2556                             pf->offset_loaded, &os->eth.rx_multicast,
2557                             &ns->eth.rx_multicast);
2558         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2559                             I40E_GLPRT_BPRCL(hw->port),
2560                             pf->offset_loaded, &os->eth.rx_broadcast,
2561                             &ns->eth.rx_broadcast);
2562         /* Workaround: CRC size should not be included in byte statistics,
2563          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2564          */
2565         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2566                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2567
2568         /* exclude internal rx bytes
2569          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2570          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
2571          * value.
2572          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
2573          */
2574         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2575                 ns->eth.rx_bytes = 0;
2576         else
2577                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2578
2579         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
2580                 ns->eth.rx_unicast = 0;
2581         else
2582                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
2583
2584         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
2585                 ns->eth.rx_multicast = 0;
2586         else
2587                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
2588
2589         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
2590                 ns->eth.rx_broadcast = 0;
2591         else
2592                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
2593
2594         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2595                             pf->offset_loaded, &os->eth.rx_discards,
2596                             &ns->eth.rx_discards);
2597         /* GLPRT_REPC not supported */
2598         /* GLPRT_RMPC not supported */
2599         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2600                             pf->offset_loaded,
2601                             &os->eth.rx_unknown_protocol,
2602                             &ns->eth.rx_unknown_protocol);
2603         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2604                             I40E_GLPRT_GOTCL(hw->port),
2605                             pf->offset_loaded, &os->eth.tx_bytes,
2606                             &ns->eth.tx_bytes);
2607         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2608                             I40E_GLPRT_UPTCL(hw->port),
2609                             pf->offset_loaded, &os->eth.tx_unicast,
2610                             &ns->eth.tx_unicast);
2611         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2612                             I40E_GLPRT_MPTCL(hw->port),
2613                             pf->offset_loaded, &os->eth.tx_multicast,
2614                             &ns->eth.tx_multicast);
2615         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2616                             I40E_GLPRT_BPTCL(hw->port),
2617                             pf->offset_loaded, &os->eth.tx_broadcast,
2618                             &ns->eth.tx_broadcast);
2619         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2620                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2621
2622         /* exclude internal tx bytes
2623          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
2624          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
2625          * value.
2626          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
2627          */
2628         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
2629                 ns->eth.tx_bytes = 0;
2630         else
2631                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
2632
2633         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
2634                 ns->eth.tx_unicast = 0;
2635         else
2636                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
2637
2638         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
2639                 ns->eth.tx_multicast = 0;
2640         else
2641                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
2642
2643         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
2644                 ns->eth.tx_broadcast = 0;
2645         else
2646                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
2647
2648         /* GLPRT_TEPC not supported */
2649
2650         /* additional port specific stats */
2651         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2652                             pf->offset_loaded, &os->tx_dropped_link_down,
2653                             &ns->tx_dropped_link_down);
2654         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2655                             pf->offset_loaded, &os->crc_errors,
2656                             &ns->crc_errors);
2657         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2658                             pf->offset_loaded, &os->illegal_bytes,
2659                             &ns->illegal_bytes);
2660         /* GLPRT_ERRBC not supported */
2661         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2662                             pf->offset_loaded, &os->mac_local_faults,
2663                             &ns->mac_local_faults);
2664         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2665                             pf->offset_loaded, &os->mac_remote_faults,
2666                             &ns->mac_remote_faults);
2667         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2668                             pf->offset_loaded, &os->rx_length_errors,
2669                             &ns->rx_length_errors);
2670         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2671                             pf->offset_loaded, &os->link_xon_rx,
2672                             &ns->link_xon_rx);
2673         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2674                             pf->offset_loaded, &os->link_xoff_rx,
2675                             &ns->link_xoff_rx);
2676         for (i = 0; i < 8; i++) {
2677                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2678                                     pf->offset_loaded,
2679                                     &os->priority_xon_rx[i],
2680                                     &ns->priority_xon_rx[i]);
2681                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2682                                     pf->offset_loaded,
2683                                     &os->priority_xoff_rx[i],
2684                                     &ns->priority_xoff_rx[i]);
2685         }
2686         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2687                             pf->offset_loaded, &os->link_xon_tx,
2688                             &ns->link_xon_tx);
2689         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2690                             pf->offset_loaded, &os->link_xoff_tx,
2691                             &ns->link_xoff_tx);
2692         for (i = 0; i < 8; i++) {
2693                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2694                                     pf->offset_loaded,
2695                                     &os->priority_xon_tx[i],
2696                                     &ns->priority_xon_tx[i]);
2697                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2698                                     pf->offset_loaded,
2699                                     &os->priority_xoff_tx[i],
2700                                     &ns->priority_xoff_tx[i]);
2701                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2702                                     pf->offset_loaded,
2703                                     &os->priority_xon_2_xoff[i],
2704                                     &ns->priority_xon_2_xoff[i]);
2705         }
2706         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2707                             I40E_GLPRT_PRC64L(hw->port),
2708                             pf->offset_loaded, &os->rx_size_64,
2709                             &ns->rx_size_64);
2710         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2711                             I40E_GLPRT_PRC127L(hw->port),
2712                             pf->offset_loaded, &os->rx_size_127,
2713                             &ns->rx_size_127);
2714         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2715                             I40E_GLPRT_PRC255L(hw->port),
2716                             pf->offset_loaded, &os->rx_size_255,
2717                             &ns->rx_size_255);
2718         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2719                             I40E_GLPRT_PRC511L(hw->port),
2720                             pf->offset_loaded, &os->rx_size_511,
2721                             &ns->rx_size_511);
2722         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2723                             I40E_GLPRT_PRC1023L(hw->port),
2724                             pf->offset_loaded, &os->rx_size_1023,
2725                             &ns->rx_size_1023);
2726         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2727                             I40E_GLPRT_PRC1522L(hw->port),
2728                             pf->offset_loaded, &os->rx_size_1522,
2729                             &ns->rx_size_1522);
2730         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2731                             I40E_GLPRT_PRC9522L(hw->port),
2732                             pf->offset_loaded, &os->rx_size_big,
2733                             &ns->rx_size_big);
2734         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2735                             pf->offset_loaded, &os->rx_undersize,
2736                             &ns->rx_undersize);
2737         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2738                             pf->offset_loaded, &os->rx_fragments,
2739                             &ns->rx_fragments);
2740         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2741                             pf->offset_loaded, &os->rx_oversize,
2742                             &ns->rx_oversize);
2743         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2744                             pf->offset_loaded, &os->rx_jabber,
2745                             &ns->rx_jabber);
2746         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2747                             I40E_GLPRT_PTC64L(hw->port),
2748                             pf->offset_loaded, &os->tx_size_64,
2749                             &ns->tx_size_64);
2750         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2751                             I40E_GLPRT_PTC127L(hw->port),
2752                             pf->offset_loaded, &os->tx_size_127,
2753                             &ns->tx_size_127);
2754         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2755                             I40E_GLPRT_PTC255L(hw->port),
2756                             pf->offset_loaded, &os->tx_size_255,
2757                             &ns->tx_size_255);
2758         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2759                             I40E_GLPRT_PTC511L(hw->port),
2760                             pf->offset_loaded, &os->tx_size_511,
2761                             &ns->tx_size_511);
2762         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2763                             I40E_GLPRT_PTC1023L(hw->port),
2764                             pf->offset_loaded, &os->tx_size_1023,
2765                             &ns->tx_size_1023);
2766         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2767                             I40E_GLPRT_PTC1522L(hw->port),
2768                             pf->offset_loaded, &os->tx_size_1522,
2769                             &ns->tx_size_1522);
2770         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2771                             I40E_GLPRT_PTC9522L(hw->port),
2772                             pf->offset_loaded, &os->tx_size_big,
2773                             &ns->tx_size_big);
2774         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2775                            pf->offset_loaded,
2776                            &os->fd_sb_match, &ns->fd_sb_match);
2777         /* GLPRT_MSPDC not supported */
2778         /* GLPRT_XEC not supported */
2779
2780         pf->offset_loaded = true;
2781
2782         if (pf->main_vsi)
2783                 i40e_update_vsi_stats(pf->main_vsi);
2784 }
2785
2786 /* Get all statistics of a port */
2787 static int
2788 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2789 {
2790         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2791         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2792         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2793         unsigned i;
2794
2795         /* call read registers - updates values, now write them to struct */
2796         i40e_read_stats_registers(pf, hw);
2797
2798         stats->ipackets = ns->eth.rx_unicast +
2799                         ns->eth.rx_multicast +
2800                         ns->eth.rx_broadcast -
2801                         ns->eth.rx_discards -
2802                         pf->main_vsi->eth_stats.rx_discards;
2803         stats->opackets = ns->eth.tx_unicast +
2804                         ns->eth.tx_multicast +
2805                         ns->eth.tx_broadcast;
2806         stats->ibytes   = ns->eth.rx_bytes;
2807         stats->obytes   = ns->eth.tx_bytes;
2808         stats->oerrors  = ns->eth.tx_errors +
2809                         pf->main_vsi->eth_stats.tx_errors;
2810
2811         /* Rx Errors */
2812         stats->imissed  = ns->eth.rx_discards +
2813                         pf->main_vsi->eth_stats.rx_discards;
2814         stats->ierrors  = ns->crc_errors +
2815                         ns->rx_length_errors + ns->rx_undersize +
2816                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2817
2818         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2819         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2820         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2821         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2822         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2823         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2824         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2825                     ns->eth.rx_unknown_protocol);
2826         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2827         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2828         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2829         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2830         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2831         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2832
2833         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2834                     ns->tx_dropped_link_down);
2835         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2836         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2837                     ns->illegal_bytes);
2838         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2839         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2840                     ns->mac_local_faults);
2841         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2842                     ns->mac_remote_faults);
2843         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2844                     ns->rx_length_errors);
2845         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2846         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2847         for (i = 0; i < 8; i++) {
2848                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
2849                                 i, ns->priority_xon_rx[i]);
2850                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
2851                                 i, ns->priority_xoff_rx[i]);
2852         }
2853         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
2854         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
2855         for (i = 0; i < 8; i++) {
2856                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
2857                                 i, ns->priority_xon_tx[i]);
2858                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
2859                                 i, ns->priority_xoff_tx[i]);
2860                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
2861                                 i, ns->priority_xon_2_xoff[i]);
2862         }
2863         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
2864         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
2865         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
2866         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
2867         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
2868         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
2869         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
2870         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
2871         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
2872         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
2873         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
2874         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
2875         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
2876         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
2877         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
2878         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
2879         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
2880         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
2881         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2882                         ns->mac_short_packet_dropped);
2883         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
2884                     ns->checksum_error);
2885         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
2886         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2887         return 0;
2888 }
2889
2890 /* Reset the statistics */
2891 static void
2892 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2893 {
2894         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2895         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2896
2897         /* Mark PF and VSI stats to update the offset, aka "reset" */
2898         pf->offset_loaded = false;
2899         if (pf->main_vsi)
2900                 pf->main_vsi->offset_loaded = false;
2901
2902         /* read the stats, reading current register values into offset */
2903         i40e_read_stats_registers(pf, hw);
2904 }
2905
2906 static uint32_t
2907 i40e_xstats_calc_num(void)
2908 {
2909         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
2910                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
2911                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
2912 }
2913
2914 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2915                                      struct rte_eth_xstat_name *xstats_names,
2916                                      __rte_unused unsigned limit)
2917 {
2918         unsigned count = 0;
2919         unsigned i, prio;
2920
2921         if (xstats_names == NULL)
2922                 return i40e_xstats_calc_num();
2923
2924         /* Note: limit checked in rte_eth_xstats_names() */
2925
2926         /* Get stats from i40e_eth_stats struct */
2927         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2928                 snprintf(xstats_names[count].name,
2929                          sizeof(xstats_names[count].name),
2930                          "%s", rte_i40e_stats_strings[i].name);
2931                 count++;
2932         }
2933
2934         /* Get individiual stats from i40e_hw_port struct */
2935         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2936                 snprintf(xstats_names[count].name,
2937                         sizeof(xstats_names[count].name),
2938                          "%s", rte_i40e_hw_port_strings[i].name);
2939                 count++;
2940         }
2941
2942         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2943                 for (prio = 0; prio < 8; prio++) {
2944                         snprintf(xstats_names[count].name,
2945                                  sizeof(xstats_names[count].name),
2946                                  "rx_priority%u_%s", prio,
2947                                  rte_i40e_rxq_prio_strings[i].name);
2948                         count++;
2949                 }
2950         }
2951
2952         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2953                 for (prio = 0; prio < 8; prio++) {
2954                         snprintf(xstats_names[count].name,
2955                                  sizeof(xstats_names[count].name),
2956                                  "tx_priority%u_%s", prio,
2957                                  rte_i40e_txq_prio_strings[i].name);
2958                         count++;
2959                 }
2960         }
2961         return count;
2962 }
2963
2964 static int
2965 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2966                     unsigned n)
2967 {
2968         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2969         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2970         unsigned i, count, prio;
2971         struct i40e_hw_port_stats *hw_stats = &pf->stats;
2972
2973         count = i40e_xstats_calc_num();
2974         if (n < count)
2975                 return count;
2976
2977         i40e_read_stats_registers(pf, hw);
2978
2979         if (xstats == NULL)
2980                 return 0;
2981
2982         count = 0;
2983
2984         /* Get stats from i40e_eth_stats struct */
2985         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2986                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
2987                         rte_i40e_stats_strings[i].offset);
2988                 xstats[count].id = count;
2989                 count++;
2990         }
2991
2992         /* Get individiual stats from i40e_hw_port struct */
2993         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2994                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2995                         rte_i40e_hw_port_strings[i].offset);
2996                 xstats[count].id = count;
2997                 count++;
2998         }
2999
3000         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3001                 for (prio = 0; prio < 8; prio++) {
3002                         xstats[count].value =
3003                                 *(uint64_t *)(((char *)hw_stats) +
3004                                 rte_i40e_rxq_prio_strings[i].offset +
3005                                 (sizeof(uint64_t) * prio));
3006                         xstats[count].id = count;
3007                         count++;
3008                 }
3009         }
3010
3011         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3012                 for (prio = 0; prio < 8; prio++) {
3013                         xstats[count].value =
3014                                 *(uint64_t *)(((char *)hw_stats) +
3015                                 rte_i40e_txq_prio_strings[i].offset +
3016                                 (sizeof(uint64_t) * prio));
3017                         xstats[count].id = count;
3018                         count++;
3019                 }
3020         }
3021
3022         return count;
3023 }
3024
3025 static int
3026 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
3027                                  __rte_unused uint16_t queue_id,
3028                                  __rte_unused uint8_t stat_idx,
3029                                  __rte_unused uint8_t is_rx)
3030 {
3031         PMD_INIT_FUNC_TRACE();
3032
3033         return -ENOSYS;
3034 }
3035
3036 static int
3037 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3038 {
3039         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3040         u32 full_ver;
3041         u8 ver, patch;
3042         u16 build;
3043         int ret;
3044
3045         full_ver = hw->nvm.oem_ver;
3046         ver = (u8)(full_ver >> 24);
3047         build = (u16)((full_ver >> 8) & 0xffff);
3048         patch = (u8)(full_ver & 0xff);
3049
3050         ret = snprintf(fw_version, fw_size,
3051                  "%d.%d%d 0x%08x %d.%d.%d",
3052                  ((hw->nvm.version >> 12) & 0xf),
3053                  ((hw->nvm.version >> 4) & 0xff),
3054                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3055                  ver, build, patch);
3056
3057         ret += 1; /* add the size of '\0' */
3058         if (fw_size < (u32)ret)
3059                 return ret;
3060         else
3061                 return 0;
3062 }
3063
3064 static void
3065 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3066 {
3067         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3068         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3069         struct i40e_vsi *vsi = pf->main_vsi;
3070         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3071
3072         dev_info->pci_dev = pci_dev;
3073         dev_info->max_rx_queues = vsi->nb_qps;
3074         dev_info->max_tx_queues = vsi->nb_qps;
3075         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3076         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3077         dev_info->max_mac_addrs = vsi->max_macaddrs;
3078         dev_info->max_vfs = pci_dev->max_vfs;
3079         dev_info->rx_offload_capa =
3080                 DEV_RX_OFFLOAD_VLAN_STRIP |
3081                 DEV_RX_OFFLOAD_QINQ_STRIP |
3082                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3083                 DEV_RX_OFFLOAD_UDP_CKSUM |
3084                 DEV_RX_OFFLOAD_TCP_CKSUM;
3085         dev_info->tx_offload_capa =
3086                 DEV_TX_OFFLOAD_VLAN_INSERT |
3087                 DEV_TX_OFFLOAD_QINQ_INSERT |
3088                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3089                 DEV_TX_OFFLOAD_UDP_CKSUM |
3090                 DEV_TX_OFFLOAD_TCP_CKSUM |
3091                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3092                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3093                 DEV_TX_OFFLOAD_TCP_TSO |
3094                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3095                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3096                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3097                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
3098         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3099                                                 sizeof(uint32_t);
3100         dev_info->reta_size = pf->hash_lut_size;
3101         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3102
3103         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3104                 .rx_thresh = {
3105                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3106                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3107                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3108                 },
3109                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3110                 .rx_drop_en = 0,
3111         };
3112
3113         dev_info->default_txconf = (struct rte_eth_txconf) {
3114                 .tx_thresh = {
3115                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3116                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3117                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3118                 },
3119                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3120                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3121                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3122                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3123         };
3124
3125         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3126                 .nb_max = I40E_MAX_RING_DESC,
3127                 .nb_min = I40E_MIN_RING_DESC,
3128                 .nb_align = I40E_ALIGN_RING_DESC,
3129         };
3130
3131         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3132                 .nb_max = I40E_MAX_RING_DESC,
3133                 .nb_min = I40E_MIN_RING_DESC,
3134                 .nb_align = I40E_ALIGN_RING_DESC,
3135                 .nb_seg_max = I40E_TX_MAX_SEG,
3136                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3137         };
3138
3139         if (pf->flags & I40E_FLAG_VMDQ) {
3140                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3141                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3142                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3143                                                 pf->max_nb_vmdq_vsi;
3144                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3145                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3146                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3147         }
3148
3149         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
3150                 /* For XL710 */
3151                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3152         else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
3153                 /* For XXV710 */
3154                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3155         else
3156                 /* For X710 */
3157                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3158 }
3159
3160 static int
3161 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3162 {
3163         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3164         struct i40e_vsi *vsi = pf->main_vsi;
3165         PMD_INIT_FUNC_TRACE();
3166
3167         if (on)
3168                 return i40e_vsi_add_vlan(vsi, vlan_id);
3169         else
3170                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3171 }
3172
3173 static int
3174 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3175                                 enum rte_vlan_type vlan_type,
3176                                 uint16_t tpid, int qinq)
3177 {
3178         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3179         uint64_t reg_r = 0;
3180         uint64_t reg_w = 0;
3181         uint16_t reg_id = 3;
3182         int ret;
3183
3184         if (qinq) {
3185                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3186                         reg_id = 2;
3187         }
3188
3189         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3190                                           &reg_r, NULL);
3191         if (ret != I40E_SUCCESS) {
3192                 PMD_DRV_LOG(ERR,
3193                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3194                            reg_id);
3195                 return -EIO;
3196         }
3197         PMD_DRV_LOG(DEBUG,
3198                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3199                     reg_id, reg_r);
3200
3201         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3202         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3203         if (reg_r == reg_w) {
3204                 PMD_DRV_LOG(DEBUG, "No need to write");
3205                 return 0;
3206         }
3207
3208         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3209                                            reg_w, NULL);
3210         if (ret != I40E_SUCCESS) {
3211                 PMD_DRV_LOG(ERR,
3212                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3213                             reg_id);
3214                 return -EIO;
3215         }
3216         PMD_DRV_LOG(DEBUG,
3217                     "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
3218                     reg_w, reg_id);
3219
3220         return 0;
3221 }
3222
3223 static int
3224 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3225                    enum rte_vlan_type vlan_type,
3226                    uint16_t tpid)
3227 {
3228         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3229         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
3230         int ret = 0;
3231
3232         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3233              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3234             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3235                 PMD_DRV_LOG(ERR,
3236                             "Unsupported vlan type.");
3237                 return -EINVAL;
3238         }
3239         /* 802.1ad frames ability is added in NVM API 1.7*/
3240         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3241                 if (qinq) {
3242                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3243                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3244                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3245                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3246                 } else {
3247                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3248                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3249                 }
3250                 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3251                 if (ret != I40E_SUCCESS) {
3252                         PMD_DRV_LOG(ERR,
3253                                     "Set switch config failed aq_err: %d",
3254                                     hw->aq.asq_last_status);
3255                         ret = -EIO;
3256                 }
3257         } else
3258                 /* If NVM API < 1.7, keep the register setting */
3259                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3260                                                       tpid, qinq);
3261
3262         return ret;
3263 }
3264
3265 static int
3266 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3267 {
3268         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3269         struct i40e_vsi *vsi = pf->main_vsi;
3270
3271         if (mask & ETH_VLAN_FILTER_MASK) {
3272                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3273                         i40e_vsi_config_vlan_filter(vsi, TRUE);
3274                 else
3275                         i40e_vsi_config_vlan_filter(vsi, FALSE);
3276         }
3277
3278         if (mask & ETH_VLAN_STRIP_MASK) {
3279                 /* Enable or disable VLAN stripping */
3280                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
3281                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
3282                 else
3283                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
3284         }
3285
3286         if (mask & ETH_VLAN_EXTEND_MASK) {
3287                 if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
3288                         i40e_vsi_config_double_vlan(vsi, TRUE);
3289                         /* Set global registers with default ethertype. */
3290                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3291                                            ETHER_TYPE_VLAN);
3292                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3293                                            ETHER_TYPE_VLAN);
3294                 }
3295                 else
3296                         i40e_vsi_config_double_vlan(vsi, FALSE);
3297         }
3298
3299         return 0;
3300 }
3301
3302 static void
3303 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3304                           __rte_unused uint16_t queue,
3305                           __rte_unused int on)
3306 {
3307         PMD_INIT_FUNC_TRACE();
3308 }
3309
3310 static int
3311 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3312 {
3313         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3314         struct i40e_vsi *vsi = pf->main_vsi;
3315         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3316         struct i40e_vsi_vlan_pvid_info info;
3317
3318         memset(&info, 0, sizeof(info));
3319         info.on = on;
3320         if (info.on)
3321                 info.config.pvid = pvid;
3322         else {
3323                 info.config.reject.tagged =
3324                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
3325                 info.config.reject.untagged =
3326                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
3327         }
3328
3329         return i40e_vsi_vlan_pvid_set(vsi, &info);
3330 }
3331
3332 static int
3333 i40e_dev_led_on(struct rte_eth_dev *dev)
3334 {
3335         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3336         uint32_t mode = i40e_led_get(hw);
3337
3338         if (mode == 0)
3339                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3340
3341         return 0;
3342 }
3343
3344 static int
3345 i40e_dev_led_off(struct rte_eth_dev *dev)
3346 {
3347         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3348         uint32_t mode = i40e_led_get(hw);
3349
3350         if (mode != 0)
3351                 i40e_led_set(hw, 0, false);
3352
3353         return 0;
3354 }
3355
3356 static int
3357 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3358 {
3359         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3360         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3361
3362         fc_conf->pause_time = pf->fc_conf.pause_time;
3363
3364         /* read out from register, in case they are modified by other port */
3365         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3366                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3367         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3368                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3369
3370         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3371         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3372
3373          /* Return current mode according to actual setting*/
3374         switch (hw->fc.current_mode) {
3375         case I40E_FC_FULL:
3376                 fc_conf->mode = RTE_FC_FULL;
3377                 break;
3378         case I40E_FC_TX_PAUSE:
3379                 fc_conf->mode = RTE_FC_TX_PAUSE;
3380                 break;
3381         case I40E_FC_RX_PAUSE:
3382                 fc_conf->mode = RTE_FC_RX_PAUSE;
3383                 break;
3384         case I40E_FC_NONE:
3385         default:
3386                 fc_conf->mode = RTE_FC_NONE;
3387         };
3388
3389         return 0;
3390 }
3391
3392 static int
3393 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3394 {
3395         uint32_t mflcn_reg, fctrl_reg, reg;
3396         uint32_t max_high_water;
3397         uint8_t i, aq_failure;
3398         int err;
3399         struct i40e_hw *hw;
3400         struct i40e_pf *pf;
3401         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3402                 [RTE_FC_NONE] = I40E_FC_NONE,
3403                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3404                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3405                 [RTE_FC_FULL] = I40E_FC_FULL
3406         };
3407
3408         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3409
3410         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3411         if ((fc_conf->high_water > max_high_water) ||
3412                         (fc_conf->high_water < fc_conf->low_water)) {
3413                 PMD_INIT_LOG(ERR,
3414                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
3415                         max_high_water);
3416                 return -EINVAL;
3417         }
3418
3419         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3420         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3421         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3422
3423         pf->fc_conf.pause_time = fc_conf->pause_time;
3424         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3425         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3426
3427         PMD_INIT_FUNC_TRACE();
3428
3429         /* All the link flow control related enable/disable register
3430          * configuration is handle by the F/W
3431          */
3432         err = i40e_set_fc(hw, &aq_failure, true);
3433         if (err < 0)
3434                 return -ENOSYS;
3435
3436         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3437                 /* Configure flow control refresh threshold,
3438                  * the value for stat_tx_pause_refresh_timer[8]
3439                  * is used for global pause operation.
3440                  */
3441
3442                 I40E_WRITE_REG(hw,
3443                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3444                                pf->fc_conf.pause_time);
3445
3446                 /* configure the timer value included in transmitted pause
3447                  * frame,
3448                  * the value for stat_tx_pause_quanta[8] is used for global
3449                  * pause operation
3450                  */
3451                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3452                                pf->fc_conf.pause_time);
3453
3454                 fctrl_reg = I40E_READ_REG(hw,
3455                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3456
3457                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3458                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3459                 else
3460                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3461
3462                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3463                                fctrl_reg);
3464         } else {
3465                 /* Configure pause time (2 TCs per register) */
3466                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3467                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3468                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3469
3470                 /* Configure flow control refresh threshold value */
3471                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3472                                pf->fc_conf.pause_time / 2);
3473
3474                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3475
3476                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3477                  *depending on configuration
3478                  */
3479                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3480                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3481                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3482                 } else {
3483                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3484                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3485                 }
3486
3487                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3488         }
3489
3490         /* config the water marker both based on the packets and bytes */
3491         I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
3492                        (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3493                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3494         I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
3495                        (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3496                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3497         I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
3498                        pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3499                        << I40E_KILOSHIFT);
3500         I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
3501                        pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3502                        << I40E_KILOSHIFT);
3503
3504         I40E_WRITE_FLUSH(hw);
3505
3506         return 0;
3507 }
3508
3509 static int
3510 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3511                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3512 {
3513         PMD_INIT_FUNC_TRACE();
3514
3515         return -ENOSYS;
3516 }
3517
3518 /* Add a MAC address, and update filters */
3519 static int
3520 i40e_macaddr_add(struct rte_eth_dev *dev,
3521                  struct ether_addr *mac_addr,
3522                  __rte_unused uint32_t index,
3523                  uint32_t pool)
3524 {
3525         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3526         struct i40e_mac_filter_info mac_filter;
3527         struct i40e_vsi *vsi;
3528         int ret;
3529
3530         /* If VMDQ not enabled or configured, return */
3531         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3532                           !pf->nb_cfg_vmdq_vsi)) {
3533                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3534                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3535                         pool);
3536                 return -ENOTSUP;
3537         }
3538
3539         if (pool > pf->nb_cfg_vmdq_vsi) {
3540                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3541                                 pool, pf->nb_cfg_vmdq_vsi);
3542                 return -EINVAL;
3543         }
3544
3545         rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3546         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3547                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3548         else
3549                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3550
3551         if (pool == 0)
3552                 vsi = pf->main_vsi;
3553         else
3554                 vsi = pf->vmdq[pool - 1].vsi;
3555
3556         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3557         if (ret != I40E_SUCCESS) {
3558                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3559                 return -ENODEV;
3560         }
3561         return 0;
3562 }
3563
3564 /* Remove a MAC address, and update filters */
3565 static void
3566 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3567 {
3568         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3569         struct i40e_vsi *vsi;
3570         struct rte_eth_dev_data *data = dev->data;
3571         struct ether_addr *macaddr;
3572         int ret;
3573         uint32_t i;
3574         uint64_t pool_sel;
3575
3576         macaddr = &(data->mac_addrs[index]);
3577
3578         pool_sel = dev->data->mac_pool_sel[index];
3579
3580         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3581                 if (pool_sel & (1ULL << i)) {
3582                         if (i == 0)
3583                                 vsi = pf->main_vsi;
3584                         else {
3585                                 /* No VMDQ pool enabled or configured */
3586                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3587                                         (i > pf->nb_cfg_vmdq_vsi)) {
3588                                         PMD_DRV_LOG(ERR,
3589                                                 "No VMDQ pool enabled/configured");
3590                                         return;
3591                                 }
3592                                 vsi = pf->vmdq[i - 1].vsi;
3593                         }
3594                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3595
3596                         if (ret) {
3597                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3598                                 return;
3599                         }
3600                 }
3601         }
3602 }
3603
3604 /* Set perfect match or hash match of MAC and VLAN for a VF */
3605 static int
3606 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3607                  struct rte_eth_mac_filter *filter,
3608                  bool add)
3609 {
3610         struct i40e_hw *hw;
3611         struct i40e_mac_filter_info mac_filter;
3612         struct ether_addr old_mac;
3613         struct ether_addr *new_mac;
3614         struct i40e_pf_vf *vf = NULL;
3615         uint16_t vf_id;
3616         int ret;
3617
3618         if (pf == NULL) {
3619                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3620                 return -EINVAL;
3621         }
3622         hw = I40E_PF_TO_HW(pf);
3623
3624         if (filter == NULL) {
3625                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3626                 return -EINVAL;
3627         }
3628
3629         new_mac = &filter->mac_addr;
3630
3631         if (is_zero_ether_addr(new_mac)) {
3632                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3633                 return -EINVAL;
3634         }
3635
3636         vf_id = filter->dst_id;
3637
3638         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3639                 PMD_DRV_LOG(ERR, "Invalid argument.");
3640                 return -EINVAL;
3641         }
3642         vf = &pf->vfs[vf_id];
3643
3644         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3645                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3646                 return -EINVAL;
3647         }
3648
3649         if (add) {
3650                 rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3651                 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3652                                 ETHER_ADDR_LEN);
3653                 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3654                                  ETHER_ADDR_LEN);
3655
3656                 mac_filter.filter_type = filter->filter_type;
3657                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3658                 if (ret != I40E_SUCCESS) {
3659                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3660                         return -1;
3661                 }
3662                 ether_addr_copy(new_mac, &pf->dev_addr);
3663         } else {
3664                 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3665                                 ETHER_ADDR_LEN);
3666                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3667                 if (ret != I40E_SUCCESS) {
3668                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3669                         return -1;
3670                 }
3671
3672                 /* Clear device address as it has been removed */
3673                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3674                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3675         }
3676
3677         return 0;
3678 }
3679
3680 /* MAC filter handle */
3681 static int
3682 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3683                 void *arg)
3684 {
3685         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3686         struct rte_eth_mac_filter *filter;
3687         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3688         int ret = I40E_NOT_SUPPORTED;
3689
3690         filter = (struct rte_eth_mac_filter *)(arg);
3691
3692         switch (filter_op) {
3693         case RTE_ETH_FILTER_NOP:
3694                 ret = I40E_SUCCESS;
3695                 break;
3696         case RTE_ETH_FILTER_ADD:
3697                 i40e_pf_disable_irq0(hw);
3698                 if (filter->is_vf)
3699                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3700                 i40e_pf_enable_irq0(hw);
3701                 break;
3702         case RTE_ETH_FILTER_DELETE:
3703                 i40e_pf_disable_irq0(hw);
3704                 if (filter->is_vf)
3705                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3706                 i40e_pf_enable_irq0(hw);
3707                 break;
3708         default:
3709                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3710                 ret = I40E_ERR_PARAM;
3711                 break;
3712         }
3713
3714         return ret;
3715 }
3716
3717 static int
3718 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3719 {
3720         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3721         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3722         int ret;
3723
3724         if (!lut)
3725                 return -EINVAL;
3726
3727         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3728                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3729                                           lut, lut_size);
3730                 if (ret) {
3731                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3732                         return ret;
3733                 }
3734         } else {
3735                 uint32_t *lut_dw = (uint32_t *)lut;
3736                 uint16_t i, lut_size_dw = lut_size / 4;
3737
3738                 for (i = 0; i < lut_size_dw; i++)
3739                         lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
3740         }
3741
3742         return 0;
3743 }
3744
3745 static int
3746 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3747 {
3748         struct i40e_pf *pf;
3749         struct i40e_hw *hw;
3750         int ret;
3751
3752         if (!vsi || !lut)
3753                 return -EINVAL;
3754
3755         pf = I40E_VSI_TO_PF(vsi);
3756         hw = I40E_VSI_TO_HW(vsi);
3757
3758         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3759                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
3760                                           lut, lut_size);
3761                 if (ret) {
3762                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3763                         return ret;
3764                 }
3765         } else {
3766                 uint32_t *lut_dw = (uint32_t *)lut;
3767                 uint16_t i, lut_size_dw = lut_size / 4;
3768
3769                 for (i = 0; i < lut_size_dw; i++)
3770                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
3771                 I40E_WRITE_FLUSH(hw);
3772         }
3773
3774         return 0;
3775 }
3776
3777 static int
3778 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
3779                          struct rte_eth_rss_reta_entry64 *reta_conf,
3780                          uint16_t reta_size)
3781 {
3782         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3783         uint16_t i, lut_size = pf->hash_lut_size;
3784         uint16_t idx, shift;
3785         uint8_t *lut;
3786         int ret;
3787
3788         if (reta_size != lut_size ||
3789                 reta_size > ETH_RSS_RETA_SIZE_512) {
3790                 PMD_DRV_LOG(ERR,
3791                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3792                         reta_size, lut_size);
3793                 return -EINVAL;
3794         }
3795
3796         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3797         if (!lut) {
3798                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3799                 return -ENOMEM;
3800         }
3801         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3802         if (ret)
3803                 goto out;
3804         for (i = 0; i < reta_size; i++) {
3805                 idx = i / RTE_RETA_GROUP_SIZE;
3806                 shift = i % RTE_RETA_GROUP_SIZE;
3807                 if (reta_conf[idx].mask & (1ULL << shift))
3808                         lut[i] = reta_conf[idx].reta[shift];
3809         }
3810         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
3811
3812 out:
3813         rte_free(lut);
3814
3815         return ret;
3816 }
3817
3818 static int
3819 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
3820                         struct rte_eth_rss_reta_entry64 *reta_conf,
3821                         uint16_t reta_size)
3822 {
3823         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3824         uint16_t i, lut_size = pf->hash_lut_size;
3825         uint16_t idx, shift;
3826         uint8_t *lut;
3827         int ret;
3828
3829         if (reta_size != lut_size ||
3830                 reta_size > ETH_RSS_RETA_SIZE_512) {
3831                 PMD_DRV_LOG(ERR,
3832                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3833                         reta_size, lut_size);
3834                 return -EINVAL;
3835         }
3836
3837         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3838         if (!lut) {
3839                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3840                 return -ENOMEM;
3841         }
3842
3843         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3844         if (ret)
3845                 goto out;
3846         for (i = 0; i < reta_size; i++) {
3847                 idx = i / RTE_RETA_GROUP_SIZE;
3848                 shift = i % RTE_RETA_GROUP_SIZE;
3849                 if (reta_conf[idx].mask & (1ULL << shift))
3850                         reta_conf[idx].reta[shift] = lut[i];
3851         }
3852
3853 out:
3854         rte_free(lut);
3855
3856         return ret;
3857 }
3858
3859 /**
3860  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
3861  * @hw:   pointer to the HW structure
3862  * @mem:  pointer to mem struct to fill out
3863  * @size: size of memory requested
3864  * @alignment: what to align the allocation to
3865  **/
3866 enum i40e_status_code
3867 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3868                         struct i40e_dma_mem *mem,
3869                         u64 size,
3870                         u32 alignment)
3871 {
3872         const struct rte_memzone *mz = NULL;
3873         char z_name[RTE_MEMZONE_NAMESIZE];
3874
3875         if (!mem)
3876                 return I40E_ERR_PARAM;
3877
3878         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
3879         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
3880                                          alignment, RTE_PGSIZE_2M);
3881         if (!mz)
3882                 return I40E_ERR_NO_MEMORY;
3883
3884         mem->size = size;
3885         mem->va = mz->addr;
3886         mem->pa = mz->iova;
3887         mem->zone = (const void *)mz;
3888         PMD_DRV_LOG(DEBUG,
3889                 "memzone %s allocated with physical address: %"PRIu64,
3890                 mz->name, mem->pa);
3891
3892         return I40E_SUCCESS;
3893 }
3894
3895 /**
3896  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
3897  * @hw:   pointer to the HW structure
3898  * @mem:  ptr to mem struct to free
3899  **/
3900 enum i40e_status_code
3901 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3902                     struct i40e_dma_mem *mem)
3903 {
3904         if (!mem)
3905                 return I40E_ERR_PARAM;
3906
3907         PMD_DRV_LOG(DEBUG,
3908                 "memzone %s to be freed with physical address: %"PRIu64,
3909                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
3910         rte_memzone_free((const struct rte_memzone *)mem->zone);
3911         mem->zone = NULL;
3912         mem->va = NULL;
3913         mem->pa = (u64)0;
3914
3915         return I40E_SUCCESS;
3916 }
3917
3918 /**
3919  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
3920  * @hw:   pointer to the HW structure
3921  * @mem:  pointer to mem struct to fill out
3922  * @size: size of memory requested
3923  **/
3924 enum i40e_status_code
3925 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3926                          struct i40e_virt_mem *mem,
3927                          u32 size)
3928 {
3929         if (!mem)
3930                 return I40E_ERR_PARAM;
3931
3932         mem->size = size;
3933         mem->va = rte_zmalloc("i40e", size, 0);
3934
3935         if (mem->va)
3936                 return I40E_SUCCESS;
3937         else
3938                 return I40E_ERR_NO_MEMORY;
3939 }
3940
3941 /**
3942  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
3943  * @hw:   pointer to the HW structure
3944  * @mem:  pointer to mem struct to free
3945  **/
3946 enum i40e_status_code
3947 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3948                      struct i40e_virt_mem *mem)
3949 {
3950         if (!mem)
3951                 return I40E_ERR_PARAM;
3952
3953         rte_free(mem->va);
3954         mem->va = NULL;
3955
3956         return I40E_SUCCESS;
3957 }
3958
3959 void
3960 i40e_init_spinlock_d(struct i40e_spinlock *sp)
3961 {
3962         rte_spinlock_init(&sp->spinlock);
3963 }
3964
3965 void
3966 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
3967 {
3968         rte_spinlock_lock(&sp->spinlock);
3969 }
3970
3971 void
3972 i40e_release_spinlock_d(struct i40e_spinlock *sp)
3973 {
3974         rte_spinlock_unlock(&sp->spinlock);
3975 }
3976
3977 void
3978 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
3979 {
3980         return;
3981 }
3982
3983 /**
3984  * Get the hardware capabilities, which will be parsed
3985  * and saved into struct i40e_hw.
3986  */
3987 static int
3988 i40e_get_cap(struct i40e_hw *hw)
3989 {
3990         struct i40e_aqc_list_capabilities_element_resp *buf;
3991         uint16_t len, size = 0;
3992         int ret;
3993
3994         /* Calculate a huge enough buff for saving response data temporarily */
3995         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
3996                                                 I40E_MAX_CAP_ELE_NUM;
3997         buf = rte_zmalloc("i40e", len, 0);
3998         if (!buf) {
3999                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4000                 return I40E_ERR_NO_MEMORY;
4001         }
4002
4003         /* Get, parse the capabilities and save it to hw */
4004         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4005                         i40e_aqc_opc_list_func_capabilities, NULL);
4006         if (ret != I40E_SUCCESS)
4007                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4008
4009         /* Free the temporary buffer after being used */
4010         rte_free(buf);
4011
4012         return ret;
4013 }
4014
4015 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4016 #define QUEUE_NUM_PER_VF_ARG                    "queue-num-per-vf"
4017 RTE_PMD_REGISTER_PARAM_STRING(net_i40e, QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16");
4018
4019 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4020                 const char *value,
4021                 void *opaque)
4022 {
4023         struct i40e_pf *pf;
4024         unsigned long num;
4025         char *end;
4026
4027         pf = (struct i40e_pf *)opaque;
4028         RTE_SET_USED(key);
4029
4030         errno = 0;
4031         num = strtoul(value, &end, 0);
4032         if (errno != 0 || end == value || *end != 0) {
4033                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4034                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4035                 return -(EINVAL);
4036         }
4037
4038         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4039                 pf->vf_nb_qp_max = (uint16_t)num;
4040         else
4041                 /* here return 0 to make next valid same argument work */
4042                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4043                             "power of 2 and equal or less than 16 !, Now it is "
4044                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4045
4046         return 0;
4047 }
4048
4049 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4050 {
4051         static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
4052         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4053         struct rte_kvargs *kvlist;
4054
4055         /* set default queue number per VF as 4 */
4056         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4057
4058         if (dev->device->devargs == NULL)
4059                 return 0;
4060
4061         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4062         if (kvlist == NULL)
4063                 return -(EINVAL);
4064
4065         if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
4066                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4067                             "the first invalid or last valid one is used !",
4068                             QUEUE_NUM_PER_VF_ARG);
4069
4070         rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
4071                            i40e_pf_parse_vf_queue_number_handler, pf);
4072
4073         rte_kvargs_free(kvlist);
4074
4075         return 0;
4076 }
4077
4078 static int
4079 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4080 {
4081         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4082         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4083         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4084         uint16_t qp_count = 0, vsi_count = 0;
4085
4086         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4087                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4088                 return -EINVAL;
4089         }
4090
4091         i40e_pf_config_vf_rxq_number(dev);
4092
4093         /* Add the parameter init for LFC */
4094         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4095         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4096         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4097
4098         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4099         pf->max_num_vsi = hw->func_caps.num_vsis;
4100         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4101         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4102
4103         /* FDir queue/VSI allocation */
4104         pf->fdir_qp_offset = 0;
4105         if (hw->func_caps.fd) {
4106                 pf->flags |= I40E_FLAG_FDIR;
4107                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4108         } else {
4109                 pf->fdir_nb_qps = 0;
4110         }
4111         qp_count += pf->fdir_nb_qps;
4112         vsi_count += 1;
4113
4114         /* LAN queue/VSI allocation */
4115         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4116         if (!hw->func_caps.rss) {
4117                 pf->lan_nb_qps = 1;
4118         } else {
4119                 pf->flags |= I40E_FLAG_RSS;
4120                 if (hw->mac.type == I40E_MAC_X722)
4121                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4122                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4123         }
4124         qp_count += pf->lan_nb_qps;
4125         vsi_count += 1;
4126
4127         /* VF queue/VSI allocation */
4128         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4129         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4130                 pf->flags |= I40E_FLAG_SRIOV;
4131                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4132                 pf->vf_num = pci_dev->max_vfs;
4133                 PMD_DRV_LOG(DEBUG,
4134                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4135                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4136         } else {
4137                 pf->vf_nb_qps = 0;
4138                 pf->vf_num = 0;
4139         }
4140         qp_count += pf->vf_nb_qps * pf->vf_num;
4141         vsi_count += pf->vf_num;
4142
4143         /* VMDq queue/VSI allocation */
4144         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4145         pf->vmdq_nb_qps = 0;
4146         pf->max_nb_vmdq_vsi = 0;
4147         if (hw->func_caps.vmdq) {
4148                 if (qp_count < hw->func_caps.num_tx_qp &&
4149                         vsi_count < hw->func_caps.num_vsis) {
4150                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4151                                 qp_count) / pf->vmdq_nb_qp_max;
4152
4153                         /* Limit the maximum number of VMDq vsi to the maximum
4154                          * ethdev can support
4155                          */
4156                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4157                                 hw->func_caps.num_vsis - vsi_count);
4158                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4159                                 ETH_64_POOLS);
4160                         if (pf->max_nb_vmdq_vsi) {
4161                                 pf->flags |= I40E_FLAG_VMDQ;
4162                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4163                                 PMD_DRV_LOG(DEBUG,
4164                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4165                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4166                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4167                         } else {
4168                                 PMD_DRV_LOG(INFO,
4169                                         "No enough queues left for VMDq");
4170                         }
4171                 } else {
4172                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4173                 }
4174         }
4175         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4176         vsi_count += pf->max_nb_vmdq_vsi;
4177
4178         if (hw->func_caps.dcb)
4179                 pf->flags |= I40E_FLAG_DCB;
4180
4181         if (qp_count > hw->func_caps.num_tx_qp) {
4182                 PMD_DRV_LOG(ERR,
4183                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4184                         qp_count, hw->func_caps.num_tx_qp);
4185                 return -EINVAL;
4186         }
4187         if (vsi_count > hw->func_caps.num_vsis) {
4188                 PMD_DRV_LOG(ERR,
4189                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4190                         vsi_count, hw->func_caps.num_vsis);
4191                 return -EINVAL;
4192         }
4193
4194         return 0;
4195 }
4196
4197 static int
4198 i40e_pf_get_switch_config(struct i40e_pf *pf)
4199 {
4200         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4201         struct i40e_aqc_get_switch_config_resp *switch_config;
4202         struct i40e_aqc_switch_config_element_resp *element;
4203         uint16_t start_seid = 0, num_reported;
4204         int ret;
4205
4206         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4207                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4208         if (!switch_config) {
4209                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4210                 return -ENOMEM;
4211         }
4212
4213         /* Get the switch configurations */
4214         ret = i40e_aq_get_switch_config(hw, switch_config,
4215                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4216         if (ret != I40E_SUCCESS) {
4217                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4218                 goto fail;
4219         }
4220         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4221         if (num_reported != 1) { /* The number should be 1 */
4222                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4223                 goto fail;
4224         }
4225
4226         /* Parse the switch configuration elements */
4227         element = &(switch_config->element[0]);
4228         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4229                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4230                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4231         } else
4232                 PMD_DRV_LOG(INFO, "Unknown element type");
4233
4234 fail:
4235         rte_free(switch_config);
4236
4237         return ret;
4238 }
4239
4240 static int
4241 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4242                         uint32_t num)
4243 {
4244         struct pool_entry *entry;
4245
4246         if (pool == NULL || num == 0)
4247                 return -EINVAL;
4248
4249         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4250         if (entry == NULL) {
4251                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4252                 return -ENOMEM;
4253         }
4254
4255         /* queue heap initialize */
4256         pool->num_free = num;
4257         pool->num_alloc = 0;
4258         pool->base = base;
4259         LIST_INIT(&pool->alloc_list);
4260         LIST_INIT(&pool->free_list);
4261
4262         /* Initialize element  */
4263         entry->base = 0;
4264         entry->len = num;
4265
4266         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4267         return 0;
4268 }
4269
4270 static void
4271 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4272 {
4273         struct pool_entry *entry, *next_entry;
4274
4275         if (pool == NULL)
4276                 return;
4277
4278         for (entry = LIST_FIRST(&pool->alloc_list);
4279                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4280                         entry = next_entry) {
4281                 LIST_REMOVE(entry, next);
4282                 rte_free(entry);
4283         }
4284
4285         for (entry = LIST_FIRST(&pool->free_list);
4286                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4287                         entry = next_entry) {
4288                 LIST_REMOVE(entry, next);
4289                 rte_free(entry);
4290         }
4291
4292         pool->num_free = 0;
4293         pool->num_alloc = 0;
4294         pool->base = 0;
4295         LIST_INIT(&pool->alloc_list);
4296         LIST_INIT(&pool->free_list);
4297 }
4298
4299 static int
4300 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4301                        uint32_t base)
4302 {
4303         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4304         uint32_t pool_offset;
4305         int insert;
4306
4307         if (pool == NULL) {
4308                 PMD_DRV_LOG(ERR, "Invalid parameter");
4309                 return -EINVAL;
4310         }
4311
4312         pool_offset = base - pool->base;
4313         /* Lookup in alloc list */
4314         LIST_FOREACH(entry, &pool->alloc_list, next) {
4315                 if (entry->base == pool_offset) {
4316                         valid_entry = entry;
4317                         LIST_REMOVE(entry, next);
4318                         break;
4319                 }
4320         }
4321
4322         /* Not find, return */
4323         if (valid_entry == NULL) {
4324                 PMD_DRV_LOG(ERR, "Failed to find entry");
4325                 return -EINVAL;
4326         }
4327
4328         /**
4329          * Found it, move it to free list  and try to merge.
4330          * In order to make merge easier, always sort it by qbase.
4331          * Find adjacent prev and last entries.
4332          */
4333         prev = next = NULL;
4334         LIST_FOREACH(entry, &pool->free_list, next) {
4335                 if (entry->base > valid_entry->base) {
4336                         next = entry;
4337                         break;
4338                 }
4339                 prev = entry;
4340         }
4341
4342         insert = 0;
4343         /* Try to merge with next one*/
4344         if (next != NULL) {
4345                 /* Merge with next one */
4346                 if (valid_entry->base + valid_entry->len == next->base) {
4347                         next->base = valid_entry->base;
4348                         next->len += valid_entry->len;
4349                         rte_free(valid_entry);
4350                         valid_entry = next;
4351                         insert = 1;
4352                 }
4353         }
4354
4355         if (prev != NULL) {
4356                 /* Merge with previous one */
4357                 if (prev->base + prev->len == valid_entry->base) {
4358                         prev->len += valid_entry->len;
4359                         /* If it merge with next one, remove next node */
4360                         if (insert == 1) {
4361                                 LIST_REMOVE(valid_entry, next);
4362                                 rte_free(valid_entry);
4363                         } else {
4364                                 rte_free(valid_entry);
4365                                 insert = 1;
4366                         }
4367                 }
4368         }
4369
4370         /* Not find any entry to merge, insert */
4371         if (insert == 0) {
4372                 if (prev != NULL)
4373                         LIST_INSERT_AFTER(prev, valid_entry, next);
4374                 else if (next != NULL)
4375                         LIST_INSERT_BEFORE(next, valid_entry, next);
4376                 else /* It's empty list, insert to head */
4377                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4378         }
4379
4380         pool->num_free += valid_entry->len;
4381         pool->num_alloc -= valid_entry->len;
4382
4383         return 0;
4384 }
4385
4386 static int
4387 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4388                        uint16_t num)
4389 {
4390         struct pool_entry *entry, *valid_entry;
4391
4392         if (pool == NULL || num == 0) {
4393                 PMD_DRV_LOG(ERR, "Invalid parameter");
4394                 return -EINVAL;
4395         }
4396
4397         if (pool->num_free < num) {
4398                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4399                             num, pool->num_free);
4400                 return -ENOMEM;
4401         }
4402
4403         valid_entry = NULL;
4404         /* Lookup  in free list and find most fit one */
4405         LIST_FOREACH(entry, &pool->free_list, next) {
4406                 if (entry->len >= num) {
4407                         /* Find best one */
4408                         if (entry->len == num) {
4409                                 valid_entry = entry;
4410                                 break;
4411                         }
4412                         if (valid_entry == NULL || valid_entry->len > entry->len)
4413                                 valid_entry = entry;
4414                 }
4415         }
4416
4417         /* Not find one to satisfy the request, return */
4418         if (valid_entry == NULL) {
4419                 PMD_DRV_LOG(ERR, "No valid entry found");
4420                 return -ENOMEM;
4421         }
4422         /**
4423          * The entry have equal queue number as requested,
4424          * remove it from alloc_list.
4425          */
4426         if (valid_entry->len == num) {
4427                 LIST_REMOVE(valid_entry, next);
4428         } else {
4429                 /**
4430                  * The entry have more numbers than requested,
4431                  * create a new entry for alloc_list and minus its
4432                  * queue base and number in free_list.
4433                  */
4434                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4435                 if (entry == NULL) {
4436                         PMD_DRV_LOG(ERR,
4437                                 "Failed to allocate memory for resource pool");
4438                         return -ENOMEM;
4439                 }
4440                 entry->base = valid_entry->base;
4441                 entry->len = num;
4442                 valid_entry->base += num;
4443                 valid_entry->len -= num;
4444                 valid_entry = entry;
4445         }
4446
4447         /* Insert it into alloc list, not sorted */
4448         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4449
4450         pool->num_free -= valid_entry->len;
4451         pool->num_alloc += valid_entry->len;
4452
4453         return valid_entry->base + pool->base;
4454 }
4455
4456 /**
4457  * bitmap_is_subset - Check whether src2 is subset of src1
4458  **/
4459 static inline int
4460 bitmap_is_subset(uint8_t src1, uint8_t src2)
4461 {
4462         return !((src1 ^ src2) & src2);
4463 }
4464
4465 static enum i40e_status_code
4466 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4467 {
4468         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4469
4470         /* If DCB is not supported, only default TC is supported */
4471         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4472                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4473                 return I40E_NOT_SUPPORTED;
4474         }
4475
4476         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4477                 PMD_DRV_LOG(ERR,
4478                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
4479                         hw->func_caps.enabled_tcmap, enabled_tcmap);
4480                 return I40E_NOT_SUPPORTED;
4481         }
4482         return I40E_SUCCESS;
4483 }
4484
4485 int
4486 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4487                                 struct i40e_vsi_vlan_pvid_info *info)
4488 {
4489         struct i40e_hw *hw;
4490         struct i40e_vsi_context ctxt;
4491         uint8_t vlan_flags = 0;
4492         int ret;
4493
4494         if (vsi == NULL || info == NULL) {
4495                 PMD_DRV_LOG(ERR, "invalid parameters");
4496                 return I40E_ERR_PARAM;
4497         }
4498
4499         if (info->on) {
4500                 vsi->info.pvid = info->config.pvid;
4501                 /**
4502                  * If insert pvid is enabled, only tagged pkts are
4503                  * allowed to be sent out.
4504                  */
4505                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4506                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4507         } else {
4508                 vsi->info.pvid = 0;
4509                 if (info->config.reject.tagged == 0)
4510                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4511
4512                 if (info->config.reject.untagged == 0)
4513                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4514         }
4515         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4516                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
4517         vsi->info.port_vlan_flags |= vlan_flags;
4518         vsi->info.valid_sections =
4519                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4520         memset(&ctxt, 0, sizeof(ctxt));
4521         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4522         ctxt.seid = vsi->seid;
4523
4524         hw = I40E_VSI_TO_HW(vsi);
4525         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4526         if (ret != I40E_SUCCESS)
4527                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4528
4529         return ret;
4530 }
4531
4532 static int
4533 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4534 {
4535         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4536         int i, ret;
4537         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4538
4539         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4540         if (ret != I40E_SUCCESS)
4541                 return ret;
4542
4543         if (!vsi->seid) {
4544                 PMD_DRV_LOG(ERR, "seid not valid");
4545                 return -EINVAL;
4546         }
4547
4548         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4549         tc_bw_data.tc_valid_bits = enabled_tcmap;
4550         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4551                 tc_bw_data.tc_bw_credits[i] =
4552                         (enabled_tcmap & (1 << i)) ? 1 : 0;
4553
4554         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4555         if (ret != I40E_SUCCESS) {
4556                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4557                 return ret;
4558         }
4559
4560         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4561                                         sizeof(vsi->info.qs_handle));
4562         return I40E_SUCCESS;
4563 }
4564
4565 static enum i40e_status_code
4566 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4567                                  struct i40e_aqc_vsi_properties_data *info,
4568                                  uint8_t enabled_tcmap)
4569 {
4570         enum i40e_status_code ret;
4571         int i, total_tc = 0;
4572         uint16_t qpnum_per_tc, bsf, qp_idx;
4573
4574         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4575         if (ret != I40E_SUCCESS)
4576                 return ret;
4577
4578         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4579                 if (enabled_tcmap & (1 << i))
4580                         total_tc++;
4581         if (total_tc == 0)
4582                 total_tc = 1;
4583         vsi->enabled_tc = enabled_tcmap;
4584
4585         /* Number of queues per enabled TC */
4586         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4587         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4588         bsf = rte_bsf32(qpnum_per_tc);
4589
4590         /* Adjust the queue number to actual queues that can be applied */
4591         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4592                 vsi->nb_qps = qpnum_per_tc * total_tc;
4593
4594         /**
4595          * Configure TC and queue mapping parameters, for enabled TC,
4596          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4597          * default queue will serve it.
4598          */
4599         qp_idx = 0;
4600         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4601                 if (vsi->enabled_tc & (1 << i)) {
4602                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4603                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4604                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4605                         qp_idx += qpnum_per_tc;
4606                 } else
4607                         info->tc_mapping[i] = 0;
4608         }
4609
4610         /* Associate queue number with VSI */
4611         if (vsi->type == I40E_VSI_SRIOV) {
4612                 info->mapping_flags |=
4613                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4614                 for (i = 0; i < vsi->nb_qps; i++)
4615                         info->queue_mapping[i] =
4616                                 rte_cpu_to_le_16(vsi->base_queue + i);
4617         } else {
4618                 info->mapping_flags |=
4619                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4620                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4621         }
4622         info->valid_sections |=
4623                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4624
4625         return I40E_SUCCESS;
4626 }
4627
4628 static int
4629 i40e_veb_release(struct i40e_veb *veb)
4630 {
4631         struct i40e_vsi *vsi;
4632         struct i40e_hw *hw;
4633
4634         if (veb == NULL)
4635                 return -EINVAL;
4636
4637         if (!TAILQ_EMPTY(&veb->head)) {
4638                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4639                 return -EACCES;
4640         }
4641         /* associate_vsi field is NULL for floating VEB */
4642         if (veb->associate_vsi != NULL) {
4643                 vsi = veb->associate_vsi;
4644                 hw = I40E_VSI_TO_HW(vsi);
4645
4646                 vsi->uplink_seid = veb->uplink_seid;
4647                 vsi->veb = NULL;
4648         } else {
4649                 veb->associate_pf->main_vsi->floating_veb = NULL;
4650                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4651         }
4652
4653         i40e_aq_delete_element(hw, veb->seid, NULL);
4654         rte_free(veb);
4655         return I40E_SUCCESS;
4656 }
4657
4658 /* Setup a veb */
4659 static struct i40e_veb *
4660 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4661 {
4662         struct i40e_veb *veb;
4663         int ret;
4664         struct i40e_hw *hw;
4665
4666         if (pf == NULL) {
4667                 PMD_DRV_LOG(ERR,
4668                             "veb setup failed, associated PF shouldn't null");
4669                 return NULL;
4670         }
4671         hw = I40E_PF_TO_HW(pf);
4672
4673         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4674         if (!veb) {
4675                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4676                 goto fail;
4677         }
4678
4679         veb->associate_vsi = vsi;
4680         veb->associate_pf = pf;
4681         TAILQ_INIT(&veb->head);
4682         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4683
4684         /* create floating veb if vsi is NULL */
4685         if (vsi != NULL) {
4686                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4687                                       I40E_DEFAULT_TCMAP, false,
4688                                       &veb->seid, false, NULL);
4689         } else {
4690                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4691                                       true, &veb->seid, false, NULL);
4692         }
4693
4694         if (ret != I40E_SUCCESS) {
4695                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4696                             hw->aq.asq_last_status);
4697                 goto fail;
4698         }
4699         veb->enabled_tc = I40E_DEFAULT_TCMAP;
4700
4701         /* get statistics index */
4702         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4703                                 &veb->stats_idx, NULL, NULL, NULL);
4704         if (ret != I40E_SUCCESS) {
4705                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
4706                             hw->aq.asq_last_status);
4707                 goto fail;
4708         }
4709         /* Get VEB bandwidth, to be implemented */
4710         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4711         if (vsi)
4712                 vsi->uplink_seid = veb->seid;
4713
4714         return veb;
4715 fail:
4716         rte_free(veb);
4717         return NULL;
4718 }
4719
4720 int
4721 i40e_vsi_release(struct i40e_vsi *vsi)
4722 {
4723         struct i40e_pf *pf;
4724         struct i40e_hw *hw;
4725         struct i40e_vsi_list *vsi_list;
4726         void *temp;
4727         int ret;
4728         struct i40e_mac_filter *f;
4729         uint16_t user_param;
4730
4731         if (!vsi)
4732                 return I40E_SUCCESS;
4733
4734         if (!vsi->adapter)
4735                 return -EFAULT;
4736
4737         user_param = vsi->user_param;
4738
4739         pf = I40E_VSI_TO_PF(vsi);
4740         hw = I40E_VSI_TO_HW(vsi);
4741
4742         /* VSI has child to attach, release child first */
4743         if (vsi->veb) {
4744                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
4745                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4746                                 return -1;
4747                 }
4748                 i40e_veb_release(vsi->veb);
4749         }
4750
4751         if (vsi->floating_veb) {
4752                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
4753                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4754                                 return -1;
4755                 }
4756         }
4757
4758         /* Remove all macvlan filters of the VSI */
4759         i40e_vsi_remove_all_macvlan_filter(vsi);
4760         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
4761                 rte_free(f);
4762
4763         if (vsi->type != I40E_VSI_MAIN &&
4764             ((vsi->type != I40E_VSI_SRIOV) ||
4765             !pf->floating_veb_list[user_param])) {
4766                 /* Remove vsi from parent's sibling list */
4767                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
4768                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4769                         return I40E_ERR_PARAM;
4770                 }
4771                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
4772                                 &vsi->sib_vsi_list, list);
4773
4774                 /* Remove all switch element of the VSI */
4775                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4776                 if (ret != I40E_SUCCESS)
4777                         PMD_DRV_LOG(ERR, "Failed to delete element");
4778         }
4779
4780         if ((vsi->type == I40E_VSI_SRIOV) &&
4781             pf->floating_veb_list[user_param]) {
4782                 /* Remove vsi from parent's sibling list */
4783                 if (vsi->parent_vsi == NULL ||
4784                     vsi->parent_vsi->floating_veb == NULL) {
4785                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4786                         return I40E_ERR_PARAM;
4787                 }
4788                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
4789                              &vsi->sib_vsi_list, list);
4790
4791                 /* Remove all switch element of the VSI */
4792                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4793                 if (ret != I40E_SUCCESS)
4794                         PMD_DRV_LOG(ERR, "Failed to delete element");
4795         }
4796
4797         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
4798
4799         if (vsi->type != I40E_VSI_SRIOV)
4800                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
4801         rte_free(vsi);
4802
4803         return I40E_SUCCESS;
4804 }
4805
4806 static int
4807 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
4808 {
4809         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4810         struct i40e_aqc_remove_macvlan_element_data def_filter;
4811         struct i40e_mac_filter_info filter;
4812         int ret;
4813
4814         if (vsi->type != I40E_VSI_MAIN)
4815                 return I40E_ERR_CONFIG;
4816         memset(&def_filter, 0, sizeof(def_filter));
4817         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
4818                                         ETH_ADDR_LEN);
4819         def_filter.vlan_tag = 0;
4820         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4821                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4822         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
4823         if (ret != I40E_SUCCESS) {
4824                 struct i40e_mac_filter *f;
4825                 struct ether_addr *mac;
4826
4827                 PMD_DRV_LOG(DEBUG,
4828                             "Cannot remove the default macvlan filter");
4829                 /* It needs to add the permanent mac into mac list */
4830                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4831                 if (f == NULL) {
4832                         PMD_DRV_LOG(ERR, "failed to allocate memory");
4833                         return I40E_ERR_NO_MEMORY;
4834                 }
4835                 mac = &f->mac_info.mac_addr;
4836                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
4837                                 ETH_ADDR_LEN);
4838                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4839                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4840                 vsi->mac_num++;
4841
4842                 return ret;
4843         }
4844         rte_memcpy(&filter.mac_addr,
4845                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
4846         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4847         return i40e_vsi_add_mac(vsi, &filter);
4848 }
4849
4850 /*
4851  * i40e_vsi_get_bw_config - Query VSI BW Information
4852  * @vsi: the VSI to be queried
4853  *
4854  * Returns 0 on success, negative value on failure
4855  */
4856 static enum i40e_status_code
4857 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
4858 {
4859         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
4860         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
4861         struct i40e_hw *hw = &vsi->adapter->hw;
4862         i40e_status ret;
4863         int i;
4864         uint32_t bw_max;
4865
4866         memset(&bw_config, 0, sizeof(bw_config));
4867         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4868         if (ret != I40E_SUCCESS) {
4869                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
4870                             hw->aq.asq_last_status);
4871                 return ret;
4872         }
4873
4874         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
4875         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
4876                                         &ets_sla_config, NULL);
4877         if (ret != I40E_SUCCESS) {
4878                 PMD_DRV_LOG(ERR,
4879                         "VSI failed to get TC bandwdith configuration %u",
4880                         hw->aq.asq_last_status);
4881                 return ret;
4882         }
4883
4884         /* store and print out BW info */
4885         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
4886         vsi->bw_info.bw_max = bw_config.max_bw;
4887         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
4888         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
4889         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
4890                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
4891                      I40E_16_BIT_WIDTH);
4892         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4893                 vsi->bw_info.bw_ets_share_credits[i] =
4894                                 ets_sla_config.share_credits[i];
4895                 vsi->bw_info.bw_ets_credits[i] =
4896                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
4897                 /* 4 bits per TC, 4th bit is reserved */
4898                 vsi->bw_info.bw_ets_max[i] =
4899                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
4900                                   RTE_LEN2MASK(3, uint8_t));
4901                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
4902                             vsi->bw_info.bw_ets_share_credits[i]);
4903                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
4904                             vsi->bw_info.bw_ets_credits[i]);
4905                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
4906                             vsi->bw_info.bw_ets_max[i]);
4907         }
4908
4909         return I40E_SUCCESS;
4910 }
4911
4912 /* i40e_enable_pf_lb
4913  * @pf: pointer to the pf structure
4914  *
4915  * allow loopback on pf
4916  */
4917 static inline void
4918 i40e_enable_pf_lb(struct i40e_pf *pf)
4919 {
4920         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4921         struct i40e_vsi_context ctxt;
4922         int ret;
4923
4924         /* Use the FW API if FW >= v5.0 */
4925         if (hw->aq.fw_maj_ver < 5) {
4926                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
4927                 return;
4928         }
4929
4930         memset(&ctxt, 0, sizeof(ctxt));
4931         ctxt.seid = pf->main_vsi_seid;
4932         ctxt.pf_num = hw->pf_id;
4933         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4934         if (ret) {
4935                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
4936                             ret, hw->aq.asq_last_status);
4937                 return;
4938         }
4939         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4940         ctxt.info.valid_sections =
4941                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4942         ctxt.info.switch_id |=
4943                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4944
4945         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4946         if (ret)
4947                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
4948                             hw->aq.asq_last_status);
4949 }
4950
4951 /* Setup a VSI */
4952 struct i40e_vsi *
4953 i40e_vsi_setup(struct i40e_pf *pf,
4954                enum i40e_vsi_type type,
4955                struct i40e_vsi *uplink_vsi,
4956                uint16_t user_param)
4957 {
4958         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4959         struct i40e_vsi *vsi;
4960         struct i40e_mac_filter_info filter;
4961         int ret;
4962         struct i40e_vsi_context ctxt;
4963         struct ether_addr broadcast =
4964                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
4965
4966         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
4967             uplink_vsi == NULL) {
4968                 PMD_DRV_LOG(ERR,
4969                         "VSI setup failed, VSI link shouldn't be NULL");
4970                 return NULL;
4971         }
4972
4973         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
4974                 PMD_DRV_LOG(ERR,
4975                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
4976                 return NULL;
4977         }
4978
4979         /* two situations
4980          * 1.type is not MAIN and uplink vsi is not NULL
4981          * If uplink vsi didn't setup VEB, create one first under veb field
4982          * 2.type is SRIOV and the uplink is NULL
4983          * If floating VEB is NULL, create one veb under floating veb field
4984          */
4985
4986         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
4987             uplink_vsi->veb == NULL) {
4988                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
4989
4990                 if (uplink_vsi->veb == NULL) {
4991                         PMD_DRV_LOG(ERR, "VEB setup failed");
4992                         return NULL;
4993                 }
4994                 /* set ALLOWLOOPBACk on pf, when veb is created */
4995                 i40e_enable_pf_lb(pf);
4996         }
4997
4998         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
4999             pf->main_vsi->floating_veb == NULL) {
5000                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5001
5002                 if (pf->main_vsi->floating_veb == NULL) {
5003                         PMD_DRV_LOG(ERR, "VEB setup failed");
5004                         return NULL;
5005                 }
5006         }
5007
5008         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5009         if (!vsi) {
5010                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5011                 return NULL;
5012         }
5013         TAILQ_INIT(&vsi->mac_list);
5014         vsi->type = type;
5015         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5016         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5017         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5018         vsi->user_param = user_param;
5019         vsi->vlan_anti_spoof_on = 0;
5020         vsi->vlan_filter_on = 0;
5021         /* Allocate queues */
5022         switch (vsi->type) {
5023         case I40E_VSI_MAIN  :
5024                 vsi->nb_qps = pf->lan_nb_qps;
5025                 break;
5026         case I40E_VSI_SRIOV :
5027                 vsi->nb_qps = pf->vf_nb_qps;
5028                 break;
5029         case I40E_VSI_VMDQ2:
5030                 vsi->nb_qps = pf->vmdq_nb_qps;
5031                 break;
5032         case I40E_VSI_FDIR:
5033                 vsi->nb_qps = pf->fdir_nb_qps;
5034                 break;
5035         default:
5036                 goto fail_mem;
5037         }
5038         /*
5039          * The filter status descriptor is reported in rx queue 0,
5040          * while the tx queue for fdir filter programming has no
5041          * such constraints, can be non-zero queues.
5042          * To simplify it, choose FDIR vsi use queue 0 pair.
5043          * To make sure it will use queue 0 pair, queue allocation
5044          * need be done before this function is called
5045          */
5046         if (type != I40E_VSI_FDIR) {
5047                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5048                         if (ret < 0) {
5049                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5050                                                 vsi->seid, ret);
5051                                 goto fail_mem;
5052                         }
5053                         vsi->base_queue = ret;
5054         } else
5055                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5056
5057         /* VF has MSIX interrupt in VF range, don't allocate here */
5058         if (type == I40E_VSI_MAIN) {
5059                 ret = i40e_res_pool_alloc(&pf->msix_pool,
5060                                           RTE_MIN(vsi->nb_qps,
5061                                                   RTE_MAX_RXTX_INTR_VEC_ID));
5062                 if (ret < 0) {
5063                         PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
5064                                     vsi->seid, ret);
5065                         goto fail_queue_alloc;
5066                 }
5067                 vsi->msix_intr = ret;
5068                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
5069         } else if (type != I40E_VSI_SRIOV) {
5070                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5071                 if (ret < 0) {
5072                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5073                         goto fail_queue_alloc;
5074                 }
5075                 vsi->msix_intr = ret;
5076                 vsi->nb_msix = 1;
5077         } else {
5078                 vsi->msix_intr = 0;
5079                 vsi->nb_msix = 0;
5080         }
5081
5082         /* Add VSI */
5083         if (type == I40E_VSI_MAIN) {
5084                 /* For main VSI, no need to add since it's default one */
5085                 vsi->uplink_seid = pf->mac_seid;
5086                 vsi->seid = pf->main_vsi_seid;
5087                 /* Bind queues with specific MSIX interrupt */
5088                 /**
5089                  * Needs 2 interrupt at least, one for misc cause which will
5090                  * enabled from OS side, Another for queues binding the
5091                  * interrupt from device side only.
5092                  */
5093
5094                 /* Get default VSI parameters from hardware */
5095                 memset(&ctxt, 0, sizeof(ctxt));
5096                 ctxt.seid = vsi->seid;
5097                 ctxt.pf_num = hw->pf_id;
5098                 ctxt.uplink_seid = vsi->uplink_seid;
5099                 ctxt.vf_num = 0;
5100                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5101                 if (ret != I40E_SUCCESS) {
5102                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5103                         goto fail_msix_alloc;
5104                 }
5105                 rte_memcpy(&vsi->info, &ctxt.info,
5106                         sizeof(struct i40e_aqc_vsi_properties_data));
5107                 vsi->vsi_id = ctxt.vsi_number;
5108                 vsi->info.valid_sections = 0;
5109
5110                 /* Configure tc, enabled TC0 only */
5111                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5112                         I40E_SUCCESS) {
5113                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5114                         goto fail_msix_alloc;
5115                 }
5116
5117                 /* TC, queue mapping */
5118                 memset(&ctxt, 0, sizeof(ctxt));
5119                 vsi->info.valid_sections |=
5120                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5121                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5122                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5123                 rte_memcpy(&ctxt.info, &vsi->info,
5124                         sizeof(struct i40e_aqc_vsi_properties_data));
5125                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5126                                                 I40E_DEFAULT_TCMAP);
5127                 if (ret != I40E_SUCCESS) {
5128                         PMD_DRV_LOG(ERR,
5129                                 "Failed to configure TC queue mapping");
5130                         goto fail_msix_alloc;
5131                 }
5132                 ctxt.seid = vsi->seid;
5133                 ctxt.pf_num = hw->pf_id;
5134                 ctxt.uplink_seid = vsi->uplink_seid;
5135                 ctxt.vf_num = 0;
5136
5137                 /* Update VSI parameters */
5138                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5139                 if (ret != I40E_SUCCESS) {
5140                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5141                         goto fail_msix_alloc;
5142                 }
5143
5144                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5145                                                 sizeof(vsi->info.tc_mapping));
5146                 rte_memcpy(&vsi->info.queue_mapping,
5147                                 &ctxt.info.queue_mapping,
5148                         sizeof(vsi->info.queue_mapping));
5149                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5150                 vsi->info.valid_sections = 0;
5151
5152                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5153                                 ETH_ADDR_LEN);
5154
5155                 /**
5156                  * Updating default filter settings are necessary to prevent
5157                  * reception of tagged packets.
5158                  * Some old firmware configurations load a default macvlan
5159                  * filter which accepts both tagged and untagged packets.
5160                  * The updating is to use a normal filter instead if needed.
5161                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5162                  * The firmware with correct configurations load the default
5163                  * macvlan filter which is expected and cannot be removed.
5164                  */
5165                 i40e_update_default_filter_setting(vsi);
5166                 i40e_config_qinq(hw, vsi);
5167         } else if (type == I40E_VSI_SRIOV) {
5168                 memset(&ctxt, 0, sizeof(ctxt));
5169                 /**
5170                  * For other VSI, the uplink_seid equals to uplink VSI's
5171                  * uplink_seid since they share same VEB
5172                  */
5173                 if (uplink_vsi == NULL)
5174                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5175                 else
5176                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5177                 ctxt.pf_num = hw->pf_id;
5178                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5179                 ctxt.uplink_seid = vsi->uplink_seid;
5180                 ctxt.connection_type = 0x1;
5181                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5182
5183                 /* Use the VEB configuration if FW >= v5.0 */
5184                 if (hw->aq.fw_maj_ver >= 5) {
5185                         /* Configure switch ID */
5186                         ctxt.info.valid_sections |=
5187                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5188                         ctxt.info.switch_id =
5189                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5190                 }
5191
5192                 /* Configure port/vlan */
5193                 ctxt.info.valid_sections |=
5194                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5195                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5196                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5197                                                 hw->func_caps.enabled_tcmap);
5198                 if (ret != I40E_SUCCESS) {
5199                         PMD_DRV_LOG(ERR,
5200                                 "Failed to configure TC queue mapping");
5201                         goto fail_msix_alloc;
5202                 }
5203
5204                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5205                 ctxt.info.valid_sections |=
5206                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5207                 /**
5208                  * Since VSI is not created yet, only configure parameter,
5209                  * will add vsi below.
5210                  */
5211
5212                 i40e_config_qinq(hw, vsi);
5213         } else if (type == I40E_VSI_VMDQ2) {
5214                 memset(&ctxt, 0, sizeof(ctxt));
5215                 /*
5216                  * For other VSI, the uplink_seid equals to uplink VSI's
5217                  * uplink_seid since they share same VEB
5218                  */
5219                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5220                 ctxt.pf_num = hw->pf_id;
5221                 ctxt.vf_num = 0;
5222                 ctxt.uplink_seid = vsi->uplink_seid;
5223                 ctxt.connection_type = 0x1;
5224                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5225
5226                 ctxt.info.valid_sections |=
5227                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5228                 /* user_param carries flag to enable loop back */
5229                 if (user_param) {
5230                         ctxt.info.switch_id =
5231                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5232                         ctxt.info.switch_id |=
5233                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5234                 }
5235
5236                 /* Configure port/vlan */
5237                 ctxt.info.valid_sections |=
5238                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5239                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5240                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5241                                                 I40E_DEFAULT_TCMAP);
5242                 if (ret != I40E_SUCCESS) {
5243                         PMD_DRV_LOG(ERR,
5244                                 "Failed to configure TC queue mapping");
5245                         goto fail_msix_alloc;
5246                 }
5247                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5248                 ctxt.info.valid_sections |=
5249                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5250         } else if (type == I40E_VSI_FDIR) {
5251                 memset(&ctxt, 0, sizeof(ctxt));
5252                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5253                 ctxt.pf_num = hw->pf_id;
5254                 ctxt.vf_num = 0;
5255                 ctxt.uplink_seid = vsi->uplink_seid;
5256                 ctxt.connection_type = 0x1;     /* regular data port */
5257                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5258                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5259                                                 I40E_DEFAULT_TCMAP);
5260                 if (ret != I40E_SUCCESS) {
5261                         PMD_DRV_LOG(ERR,
5262                                 "Failed to configure TC queue mapping.");
5263                         goto fail_msix_alloc;
5264                 }
5265                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5266                 ctxt.info.valid_sections |=
5267                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5268         } else {
5269                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5270                 goto fail_msix_alloc;
5271         }
5272
5273         if (vsi->type != I40E_VSI_MAIN) {
5274                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5275                 if (ret != I40E_SUCCESS) {
5276                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5277                                     hw->aq.asq_last_status);
5278                         goto fail_msix_alloc;
5279                 }
5280                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5281                 vsi->info.valid_sections = 0;
5282                 vsi->seid = ctxt.seid;
5283                 vsi->vsi_id = ctxt.vsi_number;
5284                 vsi->sib_vsi_list.vsi = vsi;
5285                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5286                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5287                                           &vsi->sib_vsi_list, list);
5288                 } else {
5289                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5290                                           &vsi->sib_vsi_list, list);
5291                 }
5292         }
5293
5294         /* MAC/VLAN configuration */
5295         rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5296         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5297
5298         ret = i40e_vsi_add_mac(vsi, &filter);
5299         if (ret != I40E_SUCCESS) {
5300                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5301                 goto fail_msix_alloc;
5302         }
5303
5304         /* Get VSI BW information */
5305         i40e_vsi_get_bw_config(vsi);
5306         return vsi;
5307 fail_msix_alloc:
5308         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5309 fail_queue_alloc:
5310         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5311 fail_mem:
5312         rte_free(vsi);
5313         return NULL;
5314 }
5315
5316 /* Configure vlan filter on or off */
5317 int
5318 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5319 {
5320         int i, num;
5321         struct i40e_mac_filter *f;
5322         void *temp;
5323         struct i40e_mac_filter_info *mac_filter;
5324         enum rte_mac_filter_type desired_filter;
5325         int ret = I40E_SUCCESS;
5326
5327         if (on) {
5328                 /* Filter to match MAC and VLAN */
5329                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5330         } else {
5331                 /* Filter to match only MAC */
5332                 desired_filter = RTE_MAC_PERFECT_MATCH;
5333         }
5334
5335         num = vsi->mac_num;
5336
5337         mac_filter = rte_zmalloc("mac_filter_info_data",
5338                                  num * sizeof(*mac_filter), 0);
5339         if (mac_filter == NULL) {
5340                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5341                 return I40E_ERR_NO_MEMORY;
5342         }
5343
5344         i = 0;
5345
5346         /* Remove all existing mac */
5347         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5348                 mac_filter[i] = f->mac_info;
5349                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5350                 if (ret) {
5351                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5352                                     on ? "enable" : "disable");
5353                         goto DONE;
5354                 }
5355                 i++;
5356         }
5357
5358         /* Override with new filter */
5359         for (i = 0; i < num; i++) {
5360                 mac_filter[i].filter_type = desired_filter;
5361                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5362                 if (ret) {
5363                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5364                                     on ? "enable" : "disable");
5365                         goto DONE;
5366                 }
5367         }
5368
5369 DONE:
5370         rte_free(mac_filter);
5371         return ret;
5372 }
5373
5374 /* Configure vlan stripping on or off */
5375 int
5376 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5377 {
5378         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5379         struct i40e_vsi_context ctxt;
5380         uint8_t vlan_flags;
5381         int ret = I40E_SUCCESS;
5382
5383         /* Check if it has been already on or off */
5384         if (vsi->info.valid_sections &
5385                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5386                 if (on) {
5387                         if ((vsi->info.port_vlan_flags &
5388                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5389                                 return 0; /* already on */
5390                 } else {
5391                         if ((vsi->info.port_vlan_flags &
5392                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5393                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5394                                 return 0; /* already off */
5395                 }
5396         }
5397
5398         if (on)
5399                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5400         else
5401                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5402         vsi->info.valid_sections =
5403                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5404         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5405         vsi->info.port_vlan_flags |= vlan_flags;
5406         ctxt.seid = vsi->seid;
5407         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5408         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5409         if (ret)
5410                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5411                             on ? "enable" : "disable");
5412
5413         return ret;
5414 }
5415
5416 static int
5417 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5418 {
5419         struct rte_eth_dev_data *data = dev->data;
5420         int ret;
5421         int mask = 0;
5422
5423         /* Apply vlan offload setting */
5424         mask = ETH_VLAN_STRIP_MASK |
5425                ETH_VLAN_FILTER_MASK |
5426                ETH_VLAN_EXTEND_MASK;
5427         ret = i40e_vlan_offload_set(dev, mask);
5428         if (ret) {
5429                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
5430                 return ret;
5431         }
5432
5433         /* Apply pvid setting */
5434         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5435                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
5436         if (ret)
5437                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5438
5439         return ret;
5440 }
5441
5442 static int
5443 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5444 {
5445         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5446
5447         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5448 }
5449
5450 static int
5451 i40e_update_flow_control(struct i40e_hw *hw)
5452 {
5453 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5454         struct i40e_link_status link_status;
5455         uint32_t rxfc = 0, txfc = 0, reg;
5456         uint8_t an_info;
5457         int ret;
5458
5459         memset(&link_status, 0, sizeof(link_status));
5460         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5461         if (ret != I40E_SUCCESS) {
5462                 PMD_DRV_LOG(ERR, "Failed to get link status information");
5463                 goto write_reg; /* Disable flow control */
5464         }
5465
5466         an_info = hw->phy.link_info.an_info;
5467         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5468                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5469                 ret = I40E_ERR_NOT_READY;
5470                 goto write_reg; /* Disable flow control */
5471         }
5472         /**
5473          * If link auto negotiation is enabled, flow control needs to
5474          * be configured according to it
5475          */
5476         switch (an_info & I40E_LINK_PAUSE_RXTX) {
5477         case I40E_LINK_PAUSE_RXTX:
5478                 rxfc = 1;
5479                 txfc = 1;
5480                 hw->fc.current_mode = I40E_FC_FULL;
5481                 break;
5482         case I40E_AQ_LINK_PAUSE_RX:
5483                 rxfc = 1;
5484                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5485                 break;
5486         case I40E_AQ_LINK_PAUSE_TX:
5487                 txfc = 1;
5488                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5489                 break;
5490         default:
5491                 hw->fc.current_mode = I40E_FC_NONE;
5492                 break;
5493         }
5494
5495 write_reg:
5496         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5497                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5498         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5499         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5500         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5501         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5502
5503         return ret;
5504 }
5505
5506 /* PF setup */
5507 static int
5508 i40e_pf_setup(struct i40e_pf *pf)
5509 {
5510         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5511         struct i40e_filter_control_settings settings;
5512         struct i40e_vsi *vsi;
5513         int ret;
5514
5515         /* Clear all stats counters */
5516         pf->offset_loaded = FALSE;
5517         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5518         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5519         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
5520         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
5521
5522         ret = i40e_pf_get_switch_config(pf);
5523         if (ret != I40E_SUCCESS) {
5524                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5525                 return ret;
5526         }
5527         if (pf->flags & I40E_FLAG_FDIR) {
5528                 /* make queue allocated first, let FDIR use queue pair 0*/
5529                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5530                 if (ret != I40E_FDIR_QUEUE_ID) {
5531                         PMD_DRV_LOG(ERR,
5532                                 "queue allocation fails for FDIR: ret =%d",
5533                                 ret);
5534                         pf->flags &= ~I40E_FLAG_FDIR;
5535                 }
5536         }
5537         /*  main VSI setup */
5538         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5539         if (!vsi) {
5540                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5541                 return I40E_ERR_NOT_READY;
5542         }
5543         pf->main_vsi = vsi;
5544
5545         /* Configure filter control */
5546         memset(&settings, 0, sizeof(settings));
5547         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5548                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5549         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5550                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5551         else {
5552                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5553                         hw->func_caps.rss_table_size);
5554                 return I40E_ERR_PARAM;
5555         }
5556         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5557                 hw->func_caps.rss_table_size);
5558         pf->hash_lut_size = hw->func_caps.rss_table_size;
5559
5560         /* Enable ethtype and macvlan filters */
5561         settings.enable_ethtype = TRUE;
5562         settings.enable_macvlan = TRUE;
5563         ret = i40e_set_filter_control(hw, &settings);
5564         if (ret)
5565                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5566                                                                 ret);
5567
5568         /* Update flow control according to the auto negotiation */
5569         i40e_update_flow_control(hw);
5570
5571         return I40E_SUCCESS;
5572 }
5573
5574 int
5575 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5576 {
5577         uint32_t reg;
5578         uint16_t j;
5579
5580         /**
5581          * Set or clear TX Queue Disable flags,
5582          * which is required by hardware.
5583          */
5584         i40e_pre_tx_queue_cfg(hw, q_idx, on);
5585         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5586
5587         /* Wait until the request is finished */
5588         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5589                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5590                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5591                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5592                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5593                                                         & 0x1))) {
5594                         break;
5595                 }
5596         }
5597         if (on) {
5598                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5599                         return I40E_SUCCESS; /* already on, skip next steps */
5600
5601                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5602                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5603         } else {
5604                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5605                         return I40E_SUCCESS; /* already off, skip next steps */
5606                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5607         }
5608         /* Write the register */
5609         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5610         /* Check the result */
5611         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5612                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5613                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5614                 if (on) {
5615                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5616                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5617                                 break;
5618                 } else {
5619                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5620                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5621                                 break;
5622                 }
5623         }
5624         /* Check if it is timeout */
5625         if (j >= I40E_CHK_Q_ENA_COUNT) {
5626                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5627                             (on ? "enable" : "disable"), q_idx);
5628                 return I40E_ERR_TIMEOUT;
5629         }
5630
5631         return I40E_SUCCESS;
5632 }
5633
5634 /* Swith on or off the tx queues */
5635 static int
5636 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5637 {
5638         struct rte_eth_dev_data *dev_data = pf->dev_data;
5639         struct i40e_tx_queue *txq;
5640         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5641         uint16_t i;
5642         int ret;
5643
5644         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5645                 txq = dev_data->tx_queues[i];
5646                 /* Don't operate the queue if not configured or
5647                  * if starting only per queue */
5648                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5649                         continue;
5650                 if (on)
5651                         ret = i40e_dev_tx_queue_start(dev, i);
5652                 else
5653                         ret = i40e_dev_tx_queue_stop(dev, i);
5654                 if ( ret != I40E_SUCCESS)
5655                         return ret;
5656         }
5657
5658         return I40E_SUCCESS;
5659 }
5660
5661 int
5662 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5663 {
5664         uint32_t reg;
5665         uint16_t j;
5666
5667         /* Wait until the request is finished */
5668         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5669                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5670                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5671                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5672                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5673                         break;
5674         }
5675
5676         if (on) {
5677                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5678                         return I40E_SUCCESS; /* Already on, skip next steps */
5679                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5680         } else {
5681                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5682                         return I40E_SUCCESS; /* Already off, skip next steps */
5683                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5684         }
5685
5686         /* Write the register */
5687         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5688         /* Check the result */
5689         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5690                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5691                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5692                 if (on) {
5693                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5694                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
5695                                 break;
5696                 } else {
5697                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5698                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5699                                 break;
5700                 }
5701         }
5702
5703         /* Check if it is timeout */
5704         if (j >= I40E_CHK_Q_ENA_COUNT) {
5705                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
5706                             (on ? "enable" : "disable"), q_idx);
5707                 return I40E_ERR_TIMEOUT;
5708         }
5709
5710         return I40E_SUCCESS;
5711 }
5712 /* Switch on or off the rx queues */
5713 static int
5714 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
5715 {
5716         struct rte_eth_dev_data *dev_data = pf->dev_data;
5717         struct i40e_rx_queue *rxq;
5718         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5719         uint16_t i;
5720         int ret;
5721
5722         for (i = 0; i < dev_data->nb_rx_queues; i++) {
5723                 rxq = dev_data->rx_queues[i];
5724                 /* Don't operate the queue if not configured or
5725                  * if starting only per queue */
5726                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
5727                         continue;
5728                 if (on)
5729                         ret = i40e_dev_rx_queue_start(dev, i);
5730                 else
5731                         ret = i40e_dev_rx_queue_stop(dev, i);
5732                 if (ret != I40E_SUCCESS)
5733                         return ret;
5734         }
5735
5736         return I40E_SUCCESS;
5737 }
5738
5739 /* Switch on or off all the rx/tx queues */
5740 int
5741 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
5742 {
5743         int ret;
5744
5745         if (on) {
5746                 /* enable rx queues before enabling tx queues */
5747                 ret = i40e_dev_switch_rx_queues(pf, on);
5748                 if (ret) {
5749                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
5750                         return ret;
5751                 }
5752                 ret = i40e_dev_switch_tx_queues(pf, on);
5753         } else {
5754                 /* Stop tx queues before stopping rx queues */
5755                 ret = i40e_dev_switch_tx_queues(pf, on);
5756                 if (ret) {
5757                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
5758                         return ret;
5759                 }
5760                 ret = i40e_dev_switch_rx_queues(pf, on);
5761         }
5762
5763         return ret;
5764 }
5765
5766 /* Initialize VSI for TX */
5767 static int
5768 i40e_dev_tx_init(struct i40e_pf *pf)
5769 {
5770         struct rte_eth_dev_data *data = pf->dev_data;
5771         uint16_t i;
5772         uint32_t ret = I40E_SUCCESS;
5773         struct i40e_tx_queue *txq;
5774
5775         for (i = 0; i < data->nb_tx_queues; i++) {
5776                 txq = data->tx_queues[i];
5777                 if (!txq || !txq->q_set)
5778                         continue;
5779                 ret = i40e_tx_queue_init(txq);
5780                 if (ret != I40E_SUCCESS)
5781                         break;
5782         }
5783         if (ret == I40E_SUCCESS)
5784                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
5785                                      ->eth_dev);
5786
5787         return ret;
5788 }
5789
5790 /* Initialize VSI for RX */
5791 static int
5792 i40e_dev_rx_init(struct i40e_pf *pf)
5793 {
5794         struct rte_eth_dev_data *data = pf->dev_data;
5795         int ret = I40E_SUCCESS;
5796         uint16_t i;
5797         struct i40e_rx_queue *rxq;
5798
5799         i40e_pf_config_mq_rx(pf);
5800         for (i = 0; i < data->nb_rx_queues; i++) {
5801                 rxq = data->rx_queues[i];
5802                 if (!rxq || !rxq->q_set)
5803                         continue;
5804
5805                 ret = i40e_rx_queue_init(rxq);
5806                 if (ret != I40E_SUCCESS) {
5807                         PMD_DRV_LOG(ERR,
5808                                 "Failed to do RX queue initialization");
5809                         break;
5810                 }
5811         }
5812         if (ret == I40E_SUCCESS)
5813                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
5814                                      ->eth_dev);
5815
5816         return ret;
5817 }
5818
5819 static int
5820 i40e_dev_rxtx_init(struct i40e_pf *pf)
5821 {
5822         int err;
5823
5824         err = i40e_dev_tx_init(pf);
5825         if (err) {
5826                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
5827                 return err;
5828         }
5829         err = i40e_dev_rx_init(pf);
5830         if (err) {
5831                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
5832                 return err;
5833         }
5834
5835         return err;
5836 }
5837
5838 static int
5839 i40e_vmdq_setup(struct rte_eth_dev *dev)
5840 {
5841         struct rte_eth_conf *conf = &dev->data->dev_conf;
5842         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5843         int i, err, conf_vsis, j, loop;
5844         struct i40e_vsi *vsi;
5845         struct i40e_vmdq_info *vmdq_info;
5846         struct rte_eth_vmdq_rx_conf *vmdq_conf;
5847         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5848
5849         /*
5850          * Disable interrupt to avoid message from VF. Furthermore, it will
5851          * avoid race condition in VSI creation/destroy.
5852          */
5853         i40e_pf_disable_irq0(hw);
5854
5855         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
5856                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
5857                 return -ENOTSUP;
5858         }
5859
5860         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
5861         if (conf_vsis > pf->max_nb_vmdq_vsi) {
5862                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
5863                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
5864                         pf->max_nb_vmdq_vsi);
5865                 return -ENOTSUP;
5866         }
5867
5868         if (pf->vmdq != NULL) {
5869                 PMD_INIT_LOG(INFO, "VMDQ already configured");
5870                 return 0;
5871         }
5872
5873         pf->vmdq = rte_zmalloc("vmdq_info_struct",
5874                                 sizeof(*vmdq_info) * conf_vsis, 0);
5875
5876         if (pf->vmdq == NULL) {
5877                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
5878                 return -ENOMEM;
5879         }
5880
5881         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
5882
5883         /* Create VMDQ VSI */
5884         for (i = 0; i < conf_vsis; i++) {
5885                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
5886                                 vmdq_conf->enable_loop_back);
5887                 if (vsi == NULL) {
5888                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
5889                         err = -1;
5890                         goto err_vsi_setup;
5891                 }
5892                 vmdq_info = &pf->vmdq[i];
5893                 vmdq_info->pf = pf;
5894                 vmdq_info->vsi = vsi;
5895         }
5896         pf->nb_cfg_vmdq_vsi = conf_vsis;
5897
5898         /* Configure Vlan */
5899         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
5900         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
5901                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
5902                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
5903                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
5904                                         vmdq_conf->pool_map[i].vlan_id, j);
5905
5906                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
5907                                                 vmdq_conf->pool_map[i].vlan_id);
5908                                 if (err) {
5909                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
5910                                         err = -1;
5911                                         goto err_vsi_setup;
5912                                 }
5913                         }
5914                 }
5915         }
5916
5917         i40e_pf_enable_irq0(hw);
5918
5919         return 0;
5920
5921 err_vsi_setup:
5922         for (i = 0; i < conf_vsis; i++)
5923                 if (pf->vmdq[i].vsi == NULL)
5924                         break;
5925                 else
5926                         i40e_vsi_release(pf->vmdq[i].vsi);
5927
5928         rte_free(pf->vmdq);
5929         pf->vmdq = NULL;
5930         i40e_pf_enable_irq0(hw);
5931         return err;
5932 }
5933
5934 static void
5935 i40e_stat_update_32(struct i40e_hw *hw,
5936                    uint32_t reg,
5937                    bool offset_loaded,
5938                    uint64_t *offset,
5939                    uint64_t *stat)
5940 {
5941         uint64_t new_data;
5942
5943         new_data = (uint64_t)I40E_READ_REG(hw, reg);
5944         if (!offset_loaded)
5945                 *offset = new_data;
5946
5947         if (new_data >= *offset)
5948                 *stat = (uint64_t)(new_data - *offset);
5949         else
5950                 *stat = (uint64_t)((new_data +
5951                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
5952 }
5953
5954 static void
5955 i40e_stat_update_48(struct i40e_hw *hw,
5956                    uint32_t hireg,
5957                    uint32_t loreg,
5958                    bool offset_loaded,
5959                    uint64_t *offset,
5960                    uint64_t *stat)
5961 {
5962         uint64_t new_data;
5963
5964         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
5965         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
5966                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
5967
5968         if (!offset_loaded)
5969                 *offset = new_data;
5970
5971         if (new_data >= *offset)
5972                 *stat = new_data - *offset;
5973         else
5974                 *stat = (uint64_t)((new_data +
5975                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
5976
5977         *stat &= I40E_48_BIT_MASK;
5978 }
5979
5980 /* Disable IRQ0 */
5981 void
5982 i40e_pf_disable_irq0(struct i40e_hw *hw)
5983 {
5984         /* Disable all interrupt types */
5985         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
5986         I40E_WRITE_FLUSH(hw);
5987 }
5988
5989 /* Enable IRQ0 */
5990 void
5991 i40e_pf_enable_irq0(struct i40e_hw *hw)
5992 {
5993         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
5994                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
5995                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
5996                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
5997         I40E_WRITE_FLUSH(hw);
5998 }
5999
6000 static void
6001 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6002 {
6003         /* read pending request and disable first */
6004         i40e_pf_disable_irq0(hw);
6005         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6006         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6007                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6008
6009         if (no_queue)
6010                 /* Link no queues with irq0 */
6011                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6012                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6013 }
6014
6015 static void
6016 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6017 {
6018         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6019         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6020         int i;
6021         uint16_t abs_vf_id;
6022         uint32_t index, offset, val;
6023
6024         if (!pf->vfs)
6025                 return;
6026         /**
6027          * Try to find which VF trigger a reset, use absolute VF id to access
6028          * since the reg is global register.
6029          */
6030         for (i = 0; i < pf->vf_num; i++) {
6031                 abs_vf_id = hw->func_caps.vf_base_id + i;
6032                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6033                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6034                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6035                 /* VFR event occurred */
6036                 if (val & (0x1 << offset)) {
6037                         int ret;
6038
6039                         /* Clear the event first */
6040                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6041                                                         (0x1 << offset));
6042                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6043                         /**
6044                          * Only notify a VF reset event occurred,
6045                          * don't trigger another SW reset
6046                          */
6047                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6048                         if (ret != I40E_SUCCESS)
6049                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6050                 }
6051         }
6052 }
6053
6054 static void
6055 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6056 {
6057         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6058         int i;
6059
6060         for (i = 0; i < pf->vf_num; i++)
6061                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6062 }
6063
6064 static void
6065 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6066 {
6067         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6068         struct i40e_arq_event_info info;
6069         uint16_t pending, opcode;
6070         int ret;
6071
6072         info.buf_len = I40E_AQ_BUF_SZ;
6073         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6074         if (!info.msg_buf) {
6075                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6076                 return;
6077         }
6078
6079         pending = 1;
6080         while (pending) {
6081                 ret = i40e_clean_arq_element(hw, &info, &pending);
6082
6083                 if (ret != I40E_SUCCESS) {
6084                         PMD_DRV_LOG(INFO,
6085                                 "Failed to read msg from AdminQ, aq_err: %u",
6086                                 hw->aq.asq_last_status);
6087                         break;
6088                 }
6089                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6090
6091                 switch (opcode) {
6092                 case i40e_aqc_opc_send_msg_to_pf:
6093                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6094                         i40e_pf_host_handle_vf_msg(dev,
6095                                         rte_le_to_cpu_16(info.desc.retval),
6096                                         rte_le_to_cpu_32(info.desc.cookie_high),
6097                                         rte_le_to_cpu_32(info.desc.cookie_low),
6098                                         info.msg_buf,
6099                                         info.msg_len);
6100                         break;
6101                 case i40e_aqc_opc_get_link_status:
6102                         ret = i40e_dev_link_update(dev, 0);
6103                         if (!ret)
6104                                 _rte_eth_dev_callback_process(dev,
6105                                         RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
6106                         break;
6107                 default:
6108                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6109                                     opcode);
6110                         break;
6111                 }
6112         }
6113         rte_free(info.msg_buf);
6114 }
6115
6116 /**
6117  * Interrupt handler triggered by NIC  for handling
6118  * specific interrupt.
6119  *
6120  * @param handle
6121  *  Pointer to interrupt handle.
6122  * @param param
6123  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6124  *
6125  * @return
6126  *  void
6127  */
6128 static void
6129 i40e_dev_interrupt_handler(void *param)
6130 {
6131         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6132         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6133         uint32_t icr0;
6134
6135         /* Disable interrupt */
6136         i40e_pf_disable_irq0(hw);
6137
6138         /* read out interrupt causes */
6139         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6140
6141         /* No interrupt event indicated */
6142         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6143                 PMD_DRV_LOG(INFO, "No interrupt event");
6144                 goto done;
6145         }
6146         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6147                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6148         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6149                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6150         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6151                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6152         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6153                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6154         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6155                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6156         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6157                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6158         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6159                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6160
6161         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6162                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6163                 i40e_dev_handle_vfr_event(dev);
6164         }
6165         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6166                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6167                 i40e_dev_handle_aq_msg(dev);
6168         }
6169
6170 done:
6171         /* Enable interrupt */
6172         i40e_pf_enable_irq0(hw);
6173         rte_intr_enable(dev->intr_handle);
6174 }
6175
6176 int
6177 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6178                          struct i40e_macvlan_filter *filter,
6179                          int total)
6180 {
6181         int ele_num, ele_buff_size;
6182         int num, actual_num, i;
6183         uint16_t flags;
6184         int ret = I40E_SUCCESS;
6185         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6186         struct i40e_aqc_add_macvlan_element_data *req_list;
6187
6188         if (filter == NULL  || total == 0)
6189                 return I40E_ERR_PARAM;
6190         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6191         ele_buff_size = hw->aq.asq_buf_size;
6192
6193         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6194         if (req_list == NULL) {
6195                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6196                 return I40E_ERR_NO_MEMORY;
6197         }
6198
6199         num = 0;
6200         do {
6201                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6202                 memset(req_list, 0, ele_buff_size);
6203
6204                 for (i = 0; i < actual_num; i++) {
6205                         rte_memcpy(req_list[i].mac_addr,
6206                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6207                         req_list[i].vlan_tag =
6208                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6209
6210                         switch (filter[num + i].filter_type) {
6211                         case RTE_MAC_PERFECT_MATCH:
6212                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6213                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6214                                 break;
6215                         case RTE_MACVLAN_PERFECT_MATCH:
6216                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6217                                 break;
6218                         case RTE_MAC_HASH_MATCH:
6219                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6220                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6221                                 break;
6222                         case RTE_MACVLAN_HASH_MATCH:
6223                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6224                                 break;
6225                         default:
6226                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6227                                 ret = I40E_ERR_PARAM;
6228                                 goto DONE;
6229                         }
6230
6231                         req_list[i].queue_number = 0;
6232
6233                         req_list[i].flags = rte_cpu_to_le_16(flags);
6234                 }
6235
6236                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6237                                                 actual_num, NULL);
6238                 if (ret != I40E_SUCCESS) {
6239                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6240                         goto DONE;
6241                 }
6242                 num += actual_num;
6243         } while (num < total);
6244
6245 DONE:
6246         rte_free(req_list);
6247         return ret;
6248 }
6249
6250 int
6251 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6252                             struct i40e_macvlan_filter *filter,
6253                             int total)
6254 {
6255         int ele_num, ele_buff_size;
6256         int num, actual_num, i;
6257         uint16_t flags;
6258         int ret = I40E_SUCCESS;
6259         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6260         struct i40e_aqc_remove_macvlan_element_data *req_list;
6261
6262         if (filter == NULL  || total == 0)
6263                 return I40E_ERR_PARAM;
6264
6265         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6266         ele_buff_size = hw->aq.asq_buf_size;
6267
6268         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6269         if (req_list == NULL) {
6270                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6271                 return I40E_ERR_NO_MEMORY;
6272         }
6273
6274         num = 0;
6275         do {
6276                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6277                 memset(req_list, 0, ele_buff_size);
6278
6279                 for (i = 0; i < actual_num; i++) {
6280                         rte_memcpy(req_list[i].mac_addr,
6281                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6282                         req_list[i].vlan_tag =
6283                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6284
6285                         switch (filter[num + i].filter_type) {
6286                         case RTE_MAC_PERFECT_MATCH:
6287                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6288                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6289                                 break;
6290                         case RTE_MACVLAN_PERFECT_MATCH:
6291                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6292                                 break;
6293                         case RTE_MAC_HASH_MATCH:
6294                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6295                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6296                                 break;
6297                         case RTE_MACVLAN_HASH_MATCH:
6298                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6299                                 break;
6300                         default:
6301                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6302                                 ret = I40E_ERR_PARAM;
6303                                 goto DONE;
6304                         }
6305                         req_list[i].flags = rte_cpu_to_le_16(flags);
6306                 }
6307
6308                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6309                                                 actual_num, NULL);
6310                 if (ret != I40E_SUCCESS) {
6311                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6312                         goto DONE;
6313                 }
6314                 num += actual_num;
6315         } while (num < total);
6316
6317 DONE:
6318         rte_free(req_list);
6319         return ret;
6320 }
6321
6322 /* Find out specific MAC filter */
6323 static struct i40e_mac_filter *
6324 i40e_find_mac_filter(struct i40e_vsi *vsi,
6325                          struct ether_addr *macaddr)
6326 {
6327         struct i40e_mac_filter *f;
6328
6329         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6330                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6331                         return f;
6332         }
6333
6334         return NULL;
6335 }
6336
6337 static bool
6338 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6339                          uint16_t vlan_id)
6340 {
6341         uint32_t vid_idx, vid_bit;
6342
6343         if (vlan_id > ETH_VLAN_ID_MAX)
6344                 return 0;
6345
6346         vid_idx = I40E_VFTA_IDX(vlan_id);
6347         vid_bit = I40E_VFTA_BIT(vlan_id);
6348
6349         if (vsi->vfta[vid_idx] & vid_bit)
6350                 return 1;
6351         else
6352                 return 0;
6353 }
6354
6355 static void
6356 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6357                        uint16_t vlan_id, bool on)
6358 {
6359         uint32_t vid_idx, vid_bit;
6360
6361         vid_idx = I40E_VFTA_IDX(vlan_id);
6362         vid_bit = I40E_VFTA_BIT(vlan_id);
6363
6364         if (on)
6365                 vsi->vfta[vid_idx] |= vid_bit;
6366         else
6367                 vsi->vfta[vid_idx] &= ~vid_bit;
6368 }
6369
6370 void
6371 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6372                      uint16_t vlan_id, bool on)
6373 {
6374         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6375         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6376         int ret;
6377
6378         if (vlan_id > ETH_VLAN_ID_MAX)
6379                 return;
6380
6381         i40e_store_vlan_filter(vsi, vlan_id, on);
6382
6383         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6384                 return;
6385
6386         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6387
6388         if (on) {
6389                 ret = i40e_aq_add_vlan(hw, vsi->seid,
6390                                        &vlan_data, 1, NULL);
6391                 if (ret != I40E_SUCCESS)
6392                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6393         } else {
6394                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6395                                           &vlan_data, 1, NULL);
6396                 if (ret != I40E_SUCCESS)
6397                         PMD_DRV_LOG(ERR,
6398                                     "Failed to remove vlan filter");
6399         }
6400 }
6401
6402 /**
6403  * Find all vlan options for specific mac addr,
6404  * return with actual vlan found.
6405  */
6406 int
6407 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6408                            struct i40e_macvlan_filter *mv_f,
6409                            int num, struct ether_addr *addr)
6410 {
6411         int i;
6412         uint32_t j, k;
6413
6414         /**
6415          * Not to use i40e_find_vlan_filter to decrease the loop time,
6416          * although the code looks complex.
6417           */
6418         if (num < vsi->vlan_num)
6419                 return I40E_ERR_PARAM;
6420
6421         i = 0;
6422         for (j = 0; j < I40E_VFTA_SIZE; j++) {
6423                 if (vsi->vfta[j]) {
6424                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6425                                 if (vsi->vfta[j] & (1 << k)) {
6426                                         if (i > num - 1) {
6427                                                 PMD_DRV_LOG(ERR,
6428                                                         "vlan number doesn't match");
6429                                                 return I40E_ERR_PARAM;
6430                                         }
6431                                         rte_memcpy(&mv_f[i].macaddr,
6432                                                         addr, ETH_ADDR_LEN);
6433                                         mv_f[i].vlan_id =
6434                                                 j * I40E_UINT32_BIT_SIZE + k;
6435                                         i++;
6436                                 }
6437                         }
6438                 }
6439         }
6440         return I40E_SUCCESS;
6441 }
6442
6443 static inline int
6444 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6445                            struct i40e_macvlan_filter *mv_f,
6446                            int num,
6447                            uint16_t vlan)
6448 {
6449         int i = 0;
6450         struct i40e_mac_filter *f;
6451
6452         if (num < vsi->mac_num)
6453                 return I40E_ERR_PARAM;
6454
6455         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6456                 if (i > num - 1) {
6457                         PMD_DRV_LOG(ERR, "buffer number not match");
6458                         return I40E_ERR_PARAM;
6459                 }
6460                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6461                                 ETH_ADDR_LEN);
6462                 mv_f[i].vlan_id = vlan;
6463                 mv_f[i].filter_type = f->mac_info.filter_type;
6464                 i++;
6465         }
6466
6467         return I40E_SUCCESS;
6468 }
6469
6470 static int
6471 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6472 {
6473         int i, j, num;
6474         struct i40e_mac_filter *f;
6475         struct i40e_macvlan_filter *mv_f;
6476         int ret = I40E_SUCCESS;
6477
6478         if (vsi == NULL || vsi->mac_num == 0)
6479                 return I40E_ERR_PARAM;
6480
6481         /* Case that no vlan is set */
6482         if (vsi->vlan_num == 0)
6483                 num = vsi->mac_num;
6484         else
6485                 num = vsi->mac_num * vsi->vlan_num;
6486
6487         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6488         if (mv_f == NULL) {
6489                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6490                 return I40E_ERR_NO_MEMORY;
6491         }
6492
6493         i = 0;
6494         if (vsi->vlan_num == 0) {
6495                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6496                         rte_memcpy(&mv_f[i].macaddr,
6497                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6498                         mv_f[i].filter_type = f->mac_info.filter_type;
6499                         mv_f[i].vlan_id = 0;
6500                         i++;
6501                 }
6502         } else {
6503                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6504                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6505                                         vsi->vlan_num, &f->mac_info.mac_addr);
6506                         if (ret != I40E_SUCCESS)
6507                                 goto DONE;
6508                         for (j = i; j < i + vsi->vlan_num; j++)
6509                                 mv_f[j].filter_type = f->mac_info.filter_type;
6510                         i += vsi->vlan_num;
6511                 }
6512         }
6513
6514         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6515 DONE:
6516         rte_free(mv_f);
6517
6518         return ret;
6519 }
6520
6521 int
6522 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6523 {
6524         struct i40e_macvlan_filter *mv_f;
6525         int mac_num;
6526         int ret = I40E_SUCCESS;
6527
6528         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6529                 return I40E_ERR_PARAM;
6530
6531         /* If it's already set, just return */
6532         if (i40e_find_vlan_filter(vsi,vlan))
6533                 return I40E_SUCCESS;
6534
6535         mac_num = vsi->mac_num;
6536
6537         if (mac_num == 0) {
6538                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6539                 return I40E_ERR_PARAM;
6540         }
6541
6542         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6543
6544         if (mv_f == NULL) {
6545                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6546                 return I40E_ERR_NO_MEMORY;
6547         }
6548
6549         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6550
6551         if (ret != I40E_SUCCESS)
6552                 goto DONE;
6553
6554         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6555
6556         if (ret != I40E_SUCCESS)
6557                 goto DONE;
6558
6559         i40e_set_vlan_filter(vsi, vlan, 1);
6560
6561         vsi->vlan_num++;
6562         ret = I40E_SUCCESS;
6563 DONE:
6564         rte_free(mv_f);
6565         return ret;
6566 }
6567
6568 int
6569 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6570 {
6571         struct i40e_macvlan_filter *mv_f;
6572         int mac_num;
6573         int ret = I40E_SUCCESS;
6574
6575         /**
6576          * Vlan 0 is the generic filter for untagged packets
6577          * and can't be removed.
6578          */
6579         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6580                 return I40E_ERR_PARAM;
6581
6582         /* If can't find it, just return */
6583         if (!i40e_find_vlan_filter(vsi, vlan))
6584                 return I40E_ERR_PARAM;
6585
6586         mac_num = vsi->mac_num;
6587
6588         if (mac_num == 0) {
6589                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6590                 return I40E_ERR_PARAM;
6591         }
6592
6593         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6594
6595         if (mv_f == NULL) {
6596                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6597                 return I40E_ERR_NO_MEMORY;
6598         }
6599
6600         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6601
6602         if (ret != I40E_SUCCESS)
6603                 goto DONE;
6604
6605         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6606
6607         if (ret != I40E_SUCCESS)
6608                 goto DONE;
6609
6610         /* This is last vlan to remove, replace all mac filter with vlan 0 */
6611         if (vsi->vlan_num == 1) {
6612                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6613                 if (ret != I40E_SUCCESS)
6614                         goto DONE;
6615
6616                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6617                 if (ret != I40E_SUCCESS)
6618                         goto DONE;
6619         }
6620
6621         i40e_set_vlan_filter(vsi, vlan, 0);
6622
6623         vsi->vlan_num--;
6624         ret = I40E_SUCCESS;
6625 DONE:
6626         rte_free(mv_f);
6627         return ret;
6628 }
6629
6630 int
6631 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6632 {
6633         struct i40e_mac_filter *f;
6634         struct i40e_macvlan_filter *mv_f;
6635         int i, vlan_num = 0;
6636         int ret = I40E_SUCCESS;
6637
6638         /* If it's add and we've config it, return */
6639         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6640         if (f != NULL)
6641                 return I40E_SUCCESS;
6642         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6643                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6644
6645                 /**
6646                  * If vlan_num is 0, that's the first time to add mac,
6647                  * set mask for vlan_id 0.
6648                  */
6649                 if (vsi->vlan_num == 0) {
6650                         i40e_set_vlan_filter(vsi, 0, 1);
6651                         vsi->vlan_num = 1;
6652                 }
6653                 vlan_num = vsi->vlan_num;
6654         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6655                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6656                 vlan_num = 1;
6657
6658         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6659         if (mv_f == NULL) {
6660                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6661                 return I40E_ERR_NO_MEMORY;
6662         }
6663
6664         for (i = 0; i < vlan_num; i++) {
6665                 mv_f[i].filter_type = mac_filter->filter_type;
6666                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6667                                 ETH_ADDR_LEN);
6668         }
6669
6670         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6671                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6672                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6673                                         &mac_filter->mac_addr);
6674                 if (ret != I40E_SUCCESS)
6675                         goto DONE;
6676         }
6677
6678         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6679         if (ret != I40E_SUCCESS)
6680                 goto DONE;
6681
6682         /* Add the mac addr into mac list */
6683         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6684         if (f == NULL) {
6685                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6686                 ret = I40E_ERR_NO_MEMORY;
6687                 goto DONE;
6688         }
6689         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6690                         ETH_ADDR_LEN);
6691         f->mac_info.filter_type = mac_filter->filter_type;
6692         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6693         vsi->mac_num++;
6694
6695         ret = I40E_SUCCESS;
6696 DONE:
6697         rte_free(mv_f);
6698
6699         return ret;
6700 }
6701
6702 int
6703 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
6704 {
6705         struct i40e_mac_filter *f;
6706         struct i40e_macvlan_filter *mv_f;
6707         int i, vlan_num;
6708         enum rte_mac_filter_type filter_type;
6709         int ret = I40E_SUCCESS;
6710
6711         /* Can't find it, return an error */
6712         f = i40e_find_mac_filter(vsi, addr);
6713         if (f == NULL)
6714                 return I40E_ERR_PARAM;
6715
6716         vlan_num = vsi->vlan_num;
6717         filter_type = f->mac_info.filter_type;
6718         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6719                 filter_type == RTE_MACVLAN_HASH_MATCH) {
6720                 if (vlan_num == 0) {
6721                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
6722                         return I40E_ERR_PARAM;
6723                 }
6724         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
6725                         filter_type == RTE_MAC_HASH_MATCH)
6726                 vlan_num = 1;
6727
6728         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6729         if (mv_f == NULL) {
6730                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6731                 return I40E_ERR_NO_MEMORY;
6732         }
6733
6734         for (i = 0; i < vlan_num; i++) {
6735                 mv_f[i].filter_type = filter_type;
6736                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6737                                 ETH_ADDR_LEN);
6738         }
6739         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6740                         filter_type == RTE_MACVLAN_HASH_MATCH) {
6741                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
6742                 if (ret != I40E_SUCCESS)
6743                         goto DONE;
6744         }
6745
6746         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
6747         if (ret != I40E_SUCCESS)
6748                 goto DONE;
6749
6750         /* Remove the mac addr into mac list */
6751         TAILQ_REMOVE(&vsi->mac_list, f, next);
6752         rte_free(f);
6753         vsi->mac_num--;
6754
6755         ret = I40E_SUCCESS;
6756 DONE:
6757         rte_free(mv_f);
6758         return ret;
6759 }
6760
6761 /* Configure hash enable flags for RSS */
6762 uint64_t
6763 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
6764 {
6765         uint64_t hena = 0;
6766         int i;
6767
6768         if (!flags)
6769                 return hena;
6770
6771         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
6772                 if (flags & (1ULL << i))
6773                         hena |= adapter->pctypes_tbl[i];
6774         }
6775
6776         return hena;
6777 }
6778
6779 /* Parse the hash enable flags */
6780 uint64_t
6781 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
6782 {
6783         uint64_t rss_hf = 0;
6784
6785         if (!flags)
6786                 return rss_hf;
6787         int i;
6788
6789         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
6790                 if (flags & adapter->pctypes_tbl[i])
6791                         rss_hf |= (1ULL << i);
6792         }
6793         return rss_hf;
6794 }
6795
6796 /* Disable RSS */
6797 static void
6798 i40e_pf_disable_rss(struct i40e_pf *pf)
6799 {
6800         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6801
6802         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
6803         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
6804         I40E_WRITE_FLUSH(hw);
6805 }
6806
6807 static int
6808 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
6809 {
6810         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6811         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6812         int ret = 0;
6813
6814         if (!key || key_len == 0) {
6815                 PMD_DRV_LOG(DEBUG, "No key to be configured");
6816                 return 0;
6817         } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6818                 sizeof(uint32_t)) {
6819                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
6820                 return -EINVAL;
6821         }
6822
6823         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6824                 struct i40e_aqc_get_set_rss_key_data *key_dw =
6825                         (struct i40e_aqc_get_set_rss_key_data *)key;
6826
6827                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
6828                 if (ret)
6829                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
6830         } else {
6831                 uint32_t *hash_key = (uint32_t *)key;
6832                 uint16_t i;
6833
6834                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6835                         i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]);
6836                 I40E_WRITE_FLUSH(hw);
6837         }
6838
6839         return ret;
6840 }
6841
6842 static int
6843 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
6844 {
6845         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6846         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6847         int ret;
6848
6849         if (!key || !key_len)
6850                 return -EINVAL;
6851
6852         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6853                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
6854                         (struct i40e_aqc_get_set_rss_key_data *)key);
6855                 if (ret) {
6856                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
6857                         return ret;
6858                 }
6859         } else {
6860                 uint32_t *key_dw = (uint32_t *)key;
6861                 uint16_t i;
6862
6863                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6864                         key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
6865         }
6866         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
6867
6868         return 0;
6869 }
6870
6871 static int
6872 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
6873 {
6874         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6875         uint64_t hena;
6876         int ret;
6877
6878         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
6879                                rss_conf->rss_key_len);
6880         if (ret)
6881                 return ret;
6882
6883         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
6884         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6885         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6886         I40E_WRITE_FLUSH(hw);
6887
6888         return 0;
6889 }
6890
6891 static int
6892 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
6893                          struct rte_eth_rss_conf *rss_conf)
6894 {
6895         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6896         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6897         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
6898         uint64_t hena;
6899
6900         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6901         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6902
6903         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
6904                 if (rss_hf != 0) /* Enable RSS */
6905                         return -EINVAL;
6906                 return 0; /* Nothing to do */
6907         }
6908         /* RSS enabled */
6909         if (rss_hf == 0) /* Disable RSS */
6910                 return -EINVAL;
6911
6912         return i40e_hw_rss_hash_set(pf, rss_conf);
6913 }
6914
6915 static int
6916 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
6917                            struct rte_eth_rss_conf *rss_conf)
6918 {
6919         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6920         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6921         uint64_t hena;
6922
6923         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
6924                          &rss_conf->rss_key_len);
6925
6926         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6927         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6928         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
6929
6930         return 0;
6931 }
6932
6933 static int
6934 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
6935 {
6936         switch (filter_type) {
6937         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
6938                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
6939                 break;
6940         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
6941                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
6942                 break;
6943         case RTE_TUNNEL_FILTER_IMAC_TENID:
6944                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
6945                 break;
6946         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
6947                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
6948                 break;
6949         case ETH_TUNNEL_FILTER_IMAC:
6950                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
6951                 break;
6952         case ETH_TUNNEL_FILTER_OIP:
6953                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
6954                 break;
6955         case ETH_TUNNEL_FILTER_IIP:
6956                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
6957                 break;
6958         default:
6959                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
6960                 return -EINVAL;
6961         }
6962
6963         return 0;
6964 }
6965
6966 /* Convert tunnel filter structure */
6967 static int
6968 i40e_tunnel_filter_convert(
6969         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
6970         struct i40e_tunnel_filter *tunnel_filter)
6971 {
6972         ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
6973                         (struct ether_addr *)&tunnel_filter->input.outer_mac);
6974         ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
6975                         (struct ether_addr *)&tunnel_filter->input.inner_mac);
6976         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
6977         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
6978              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
6979             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
6980                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
6981         else
6982                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
6983         tunnel_filter->input.flags = cld_filter->element.flags;
6984         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
6985         tunnel_filter->queue = cld_filter->element.queue_number;
6986         rte_memcpy(tunnel_filter->input.general_fields,
6987                    cld_filter->general_fields,
6988                    sizeof(cld_filter->general_fields));
6989
6990         return 0;
6991 }
6992
6993 /* Check if there exists the tunnel filter */
6994 struct i40e_tunnel_filter *
6995 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
6996                              const struct i40e_tunnel_filter_input *input)
6997 {
6998         int ret;
6999
7000         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7001         if (ret < 0)
7002                 return NULL;
7003
7004         return tunnel_rule->hash_map[ret];
7005 }
7006
7007 /* Add a tunnel filter into the SW list */
7008 static int
7009 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7010                              struct i40e_tunnel_filter *tunnel_filter)
7011 {
7012         struct i40e_tunnel_rule *rule = &pf->tunnel;
7013         int ret;
7014
7015         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7016         if (ret < 0) {
7017                 PMD_DRV_LOG(ERR,
7018                             "Failed to insert tunnel filter to hash table %d!",
7019                             ret);
7020                 return ret;
7021         }
7022         rule->hash_map[ret] = tunnel_filter;
7023
7024         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7025
7026         return 0;
7027 }
7028
7029 /* Delete a tunnel filter from the SW list */
7030 int
7031 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7032                           struct i40e_tunnel_filter_input *input)
7033 {
7034         struct i40e_tunnel_rule *rule = &pf->tunnel;
7035         struct i40e_tunnel_filter *tunnel_filter;
7036         int ret;
7037
7038         ret = rte_hash_del_key(rule->hash_table, input);
7039         if (ret < 0) {
7040                 PMD_DRV_LOG(ERR,
7041                             "Failed to delete tunnel filter to hash table %d!",
7042                             ret);
7043                 return ret;
7044         }
7045         tunnel_filter = rule->hash_map[ret];
7046         rule->hash_map[ret] = NULL;
7047
7048         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7049         rte_free(tunnel_filter);
7050
7051         return 0;
7052 }
7053
7054 int
7055 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7056                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
7057                         uint8_t add)
7058 {
7059         uint16_t ip_type;
7060         uint32_t ipv4_addr, ipv4_addr_le;
7061         uint8_t i, tun_type = 0;
7062         /* internal varialbe to convert ipv6 byte order */
7063         uint32_t convert_ipv6[4];
7064         int val, ret = 0;
7065         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7066         struct i40e_vsi *vsi = pf->main_vsi;
7067         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7068         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7069         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7070         struct i40e_tunnel_filter *tunnel, *node;
7071         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7072
7073         cld_filter = rte_zmalloc("tunnel_filter",
7074                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7075         0);
7076
7077         if (NULL == cld_filter) {
7078                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7079                 return -ENOMEM;
7080         }
7081         pfilter = cld_filter;
7082
7083         ether_addr_copy(&tunnel_filter->outer_mac,
7084                         (struct ether_addr *)&pfilter->element.outer_mac);
7085         ether_addr_copy(&tunnel_filter->inner_mac,
7086                         (struct ether_addr *)&pfilter->element.inner_mac);
7087
7088         pfilter->element.inner_vlan =
7089                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7090         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7091                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7092                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7093                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7094                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7095                                 &ipv4_addr_le,
7096                                 sizeof(pfilter->element.ipaddr.v4.data));
7097         } else {
7098                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7099                 for (i = 0; i < 4; i++) {
7100                         convert_ipv6[i] =
7101                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7102                 }
7103                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7104                            &convert_ipv6,
7105                            sizeof(pfilter->element.ipaddr.v6.data));
7106         }
7107
7108         /* check tunneled type */
7109         switch (tunnel_filter->tunnel_type) {
7110         case RTE_TUNNEL_TYPE_VXLAN:
7111                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7112                 break;
7113         case RTE_TUNNEL_TYPE_NVGRE:
7114                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7115                 break;
7116         case RTE_TUNNEL_TYPE_IP_IN_GRE:
7117                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7118                 break;
7119         default:
7120                 /* Other tunnel types is not supported. */
7121                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7122                 rte_free(cld_filter);
7123                 return -EINVAL;
7124         }
7125
7126         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7127                                        &pfilter->element.flags);
7128         if (val < 0) {
7129                 rte_free(cld_filter);
7130                 return -EINVAL;
7131         }
7132
7133         pfilter->element.flags |= rte_cpu_to_le_16(
7134                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7135                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7136         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7137         pfilter->element.queue_number =
7138                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7139
7140         /* Check if there is the filter in SW list */
7141         memset(&check_filter, 0, sizeof(check_filter));
7142         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7143         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7144         if (add && node) {
7145                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7146                 return -EINVAL;
7147         }
7148
7149         if (!add && !node) {
7150                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7151                 return -EINVAL;
7152         }
7153
7154         if (add) {
7155                 ret = i40e_aq_add_cloud_filters(hw,
7156                                         vsi->seid, &cld_filter->element, 1);
7157                 if (ret < 0) {
7158                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7159                         return -ENOTSUP;
7160                 }
7161                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7162                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7163                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7164         } else {
7165                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7166                                                    &cld_filter->element, 1);
7167                 if (ret < 0) {
7168                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7169                         return -ENOTSUP;
7170                 }
7171                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7172         }
7173
7174         rte_free(cld_filter);
7175         return ret;
7176 }
7177
7178 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7179 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7180 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7181 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7182 #define I40E_TR_GRE_KEY_MASK                    0x400
7183 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7184 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7185
7186 static enum
7187 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7188 {
7189         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7190         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7191         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7192         enum i40e_status_code status = I40E_SUCCESS;
7193
7194         memset(&filter_replace, 0,
7195                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7196         memset(&filter_replace_buf, 0,
7197                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7198
7199         /* create L1 filter */
7200         filter_replace.old_filter_type =
7201                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7202         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7203         filter_replace.tr_bit = 0;
7204
7205         /* Prepare the buffer, 3 entries */
7206         filter_replace_buf.data[0] =
7207                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7208         filter_replace_buf.data[0] |=
7209                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7210         filter_replace_buf.data[2] = 0xFF;
7211         filter_replace_buf.data[3] = 0xFF;
7212         filter_replace_buf.data[4] =
7213                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7214         filter_replace_buf.data[4] |=
7215                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7216         filter_replace_buf.data[7] = 0xF0;
7217         filter_replace_buf.data[8]
7218                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7219         filter_replace_buf.data[8] |=
7220                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7221         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7222                 I40E_TR_GENEVE_KEY_MASK |
7223                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7224         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7225                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7226                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7227
7228         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7229                                                &filter_replace_buf);
7230         return status;
7231 }
7232
7233 static enum
7234 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7235 {
7236         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7237         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7238         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7239         enum i40e_status_code status = I40E_SUCCESS;
7240
7241         /* For MPLSoUDP */
7242         memset(&filter_replace, 0,
7243                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7244         memset(&filter_replace_buf, 0,
7245                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7246         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7247                 I40E_AQC_MIRROR_CLOUD_FILTER;
7248         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7249         filter_replace.new_filter_type =
7250                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7251         /* Prepare the buffer, 2 entries */
7252         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7253         filter_replace_buf.data[0] |=
7254                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7255         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7256         filter_replace_buf.data[4] |=
7257                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7258         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7259                                                &filter_replace_buf);
7260         if (status < 0)
7261                 return status;
7262
7263         /* For MPLSoGRE */
7264         memset(&filter_replace, 0,
7265                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7266         memset(&filter_replace_buf, 0,
7267                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7268
7269         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7270                 I40E_AQC_MIRROR_CLOUD_FILTER;
7271         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7272         filter_replace.new_filter_type =
7273                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7274         /* Prepare the buffer, 2 entries */
7275         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7276         filter_replace_buf.data[0] |=
7277                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7278         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7279         filter_replace_buf.data[4] |=
7280                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7281
7282         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7283                                                &filter_replace_buf);
7284         return status;
7285 }
7286
7287 static enum i40e_status_code
7288 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7289 {
7290         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7291         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7292         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7293         enum i40e_status_code status = I40E_SUCCESS;
7294
7295         /* For GTP-C */
7296         memset(&filter_replace, 0,
7297                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7298         memset(&filter_replace_buf, 0,
7299                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7300         /* create L1 filter */
7301         filter_replace.old_filter_type =
7302                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7303         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
7304         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
7305                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7306         /* Prepare the buffer, 2 entries */
7307         filter_replace_buf.data[0] =
7308                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7309         filter_replace_buf.data[0] |=
7310                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7311         filter_replace_buf.data[2] = 0xFF;
7312         filter_replace_buf.data[3] = 0xFF;
7313         filter_replace_buf.data[4] =
7314                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7315         filter_replace_buf.data[4] |=
7316                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7317         filter_replace_buf.data[6] = 0xFF;
7318         filter_replace_buf.data[7] = 0xFF;
7319         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7320                                                &filter_replace_buf);
7321         if (status < 0)
7322                 return status;
7323
7324         /* for GTP-U */
7325         memset(&filter_replace, 0,
7326                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7327         memset(&filter_replace_buf, 0,
7328                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7329         /* create L1 filter */
7330         filter_replace.old_filter_type =
7331                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
7332         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
7333         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
7334                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7335         /* Prepare the buffer, 2 entries */
7336         filter_replace_buf.data[0] =
7337                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7338         filter_replace_buf.data[0] |=
7339                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7340         filter_replace_buf.data[2] = 0xFF;
7341         filter_replace_buf.data[3] = 0xFF;
7342         filter_replace_buf.data[4] =
7343                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7344         filter_replace_buf.data[4] |=
7345                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7346         filter_replace_buf.data[6] = 0xFF;
7347         filter_replace_buf.data[7] = 0xFF;
7348
7349         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7350                                                &filter_replace_buf);
7351         return status;
7352 }
7353
7354 static enum
7355 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
7356 {
7357         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7358         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7359         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7360         enum i40e_status_code status = I40E_SUCCESS;
7361
7362         /* for GTP-C */
7363         memset(&filter_replace, 0,
7364                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7365         memset(&filter_replace_buf, 0,
7366                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7367         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7368         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7369         filter_replace.new_filter_type =
7370                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7371         /* Prepare the buffer, 2 entries */
7372         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
7373         filter_replace_buf.data[0] |=
7374                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7375         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7376         filter_replace_buf.data[4] |=
7377                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7378         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7379                                                &filter_replace_buf);
7380         if (status < 0)
7381                 return status;
7382
7383         /* for GTP-U */
7384         memset(&filter_replace, 0,
7385                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7386         memset(&filter_replace_buf, 0,
7387                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7388         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7389         filter_replace.old_filter_type =
7390                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7391         filter_replace.new_filter_type =
7392                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7393         /* Prepare the buffer, 2 entries */
7394         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
7395         filter_replace_buf.data[0] |=
7396                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7397         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7398         filter_replace_buf.data[4] |=
7399                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7400
7401         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7402                                                &filter_replace_buf);
7403         return status;
7404 }
7405
7406 int
7407 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7408                       struct i40e_tunnel_filter_conf *tunnel_filter,
7409                       uint8_t add)
7410 {
7411         uint16_t ip_type;
7412         uint32_t ipv4_addr, ipv4_addr_le;
7413         uint8_t i, tun_type = 0;
7414         /* internal variable to convert ipv6 byte order */
7415         uint32_t convert_ipv6[4];
7416         int val, ret = 0;
7417         struct i40e_pf_vf *vf = NULL;
7418         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7419         struct i40e_vsi *vsi;
7420         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7421         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7422         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7423         struct i40e_tunnel_filter *tunnel, *node;
7424         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7425         uint32_t teid_le;
7426         bool big_buffer = 0;
7427
7428         cld_filter = rte_zmalloc("tunnel_filter",
7429                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7430                          0);
7431
7432         if (cld_filter == NULL) {
7433                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7434                 return -ENOMEM;
7435         }
7436         pfilter = cld_filter;
7437
7438         ether_addr_copy(&tunnel_filter->outer_mac,
7439                         (struct ether_addr *)&pfilter->element.outer_mac);
7440         ether_addr_copy(&tunnel_filter->inner_mac,
7441                         (struct ether_addr *)&pfilter->element.inner_mac);
7442
7443         pfilter->element.inner_vlan =
7444                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7445         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7446                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7447                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7448                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7449                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7450                                 &ipv4_addr_le,
7451                                 sizeof(pfilter->element.ipaddr.v4.data));
7452         } else {
7453                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7454                 for (i = 0; i < 4; i++) {
7455                         convert_ipv6[i] =
7456                         rte_cpu_to_le_32(rte_be_to_cpu_32(
7457                                          tunnel_filter->ip_addr.ipv6_addr[i]));
7458                 }
7459                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7460                            &convert_ipv6,
7461                            sizeof(pfilter->element.ipaddr.v6.data));
7462         }
7463
7464         /* check tunneled type */
7465         switch (tunnel_filter->tunnel_type) {
7466         case I40E_TUNNEL_TYPE_VXLAN:
7467                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7468                 break;
7469         case I40E_TUNNEL_TYPE_NVGRE:
7470                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7471                 break;
7472         case I40E_TUNNEL_TYPE_IP_IN_GRE:
7473                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7474                 break;
7475         case I40E_TUNNEL_TYPE_MPLSoUDP:
7476                 if (!pf->mpls_replace_flag) {
7477                         i40e_replace_mpls_l1_filter(pf);
7478                         i40e_replace_mpls_cloud_filter(pf);
7479                         pf->mpls_replace_flag = 1;
7480                 }
7481                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7482                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7483                         teid_le >> 4;
7484                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7485                         (teid_le & 0xF) << 12;
7486                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7487                         0x40;
7488                 big_buffer = 1;
7489                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
7490                 break;
7491         case I40E_TUNNEL_TYPE_MPLSoGRE:
7492                 if (!pf->mpls_replace_flag) {
7493                         i40e_replace_mpls_l1_filter(pf);
7494                         i40e_replace_mpls_cloud_filter(pf);
7495                         pf->mpls_replace_flag = 1;
7496                 }
7497                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7498                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7499                         teid_le >> 4;
7500                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7501                         (teid_le & 0xF) << 12;
7502                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7503                         0x0;
7504                 big_buffer = 1;
7505                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
7506                 break;
7507         case I40E_TUNNEL_TYPE_GTPC:
7508                 if (!pf->gtp_replace_flag) {
7509                         i40e_replace_gtp_l1_filter(pf);
7510                         i40e_replace_gtp_cloud_filter(pf);
7511                         pf->gtp_replace_flag = 1;
7512                 }
7513                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7514                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
7515                         (teid_le >> 16) & 0xFFFF;
7516                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
7517                         teid_le & 0xFFFF;
7518                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
7519                         0x0;
7520                 big_buffer = 1;
7521                 break;
7522         case I40E_TUNNEL_TYPE_GTPU:
7523                 if (!pf->gtp_replace_flag) {
7524                         i40e_replace_gtp_l1_filter(pf);
7525                         i40e_replace_gtp_cloud_filter(pf);
7526                         pf->gtp_replace_flag = 1;
7527                 }
7528                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7529                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
7530                         (teid_le >> 16) & 0xFFFF;
7531                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
7532                         teid_le & 0xFFFF;
7533                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
7534                         0x0;
7535                 big_buffer = 1;
7536                 break;
7537         case I40E_TUNNEL_TYPE_QINQ:
7538                 if (!pf->qinq_replace_flag) {
7539                         ret = i40e_cloud_filter_qinq_create(pf);
7540                         if (ret < 0)
7541                                 PMD_DRV_LOG(DEBUG,
7542                                             "QinQ tunnel filter already created.");
7543                         pf->qinq_replace_flag = 1;
7544                 }
7545                 /*      Add in the General fields the values of
7546                  *      the Outer and Inner VLAN
7547                  *      Big Buffer should be set, see changes in
7548                  *      i40e_aq_add_cloud_filters
7549                  */
7550                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
7551                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
7552                 big_buffer = 1;
7553                 break;
7554         default:
7555                 /* Other tunnel types is not supported. */
7556                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7557                 rte_free(cld_filter);
7558                 return -EINVAL;
7559         }
7560
7561         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
7562                 pfilter->element.flags =
7563                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7564         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
7565                 pfilter->element.flags =
7566                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7567         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
7568                 pfilter->element.flags =
7569                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7570         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
7571                 pfilter->element.flags =
7572                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7573         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
7574                 pfilter->element.flags |=
7575                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
7576         else {
7577                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7578                                                 &pfilter->element.flags);
7579                 if (val < 0) {
7580                         rte_free(cld_filter);
7581                         return -EINVAL;
7582                 }
7583         }
7584
7585         pfilter->element.flags |= rte_cpu_to_le_16(
7586                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7587                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7588         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7589         pfilter->element.queue_number =
7590                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7591
7592         if (!tunnel_filter->is_to_vf)
7593                 vsi = pf->main_vsi;
7594         else {
7595                 if (tunnel_filter->vf_id >= pf->vf_num) {
7596                         PMD_DRV_LOG(ERR, "Invalid argument.");
7597                         return -EINVAL;
7598                 }
7599                 vf = &pf->vfs[tunnel_filter->vf_id];
7600                 vsi = vf->vsi;
7601         }
7602
7603         /* Check if there is the filter in SW list */
7604         memset(&check_filter, 0, sizeof(check_filter));
7605         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7606         check_filter.is_to_vf = tunnel_filter->is_to_vf;
7607         check_filter.vf_id = tunnel_filter->vf_id;
7608         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7609         if (add && node) {
7610                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7611                 return -EINVAL;
7612         }
7613
7614         if (!add && !node) {
7615                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7616                 return -EINVAL;
7617         }
7618
7619         if (add) {
7620                 if (big_buffer)
7621                         ret = i40e_aq_add_cloud_filters_big_buffer(hw,
7622                                                    vsi->seid, cld_filter, 1);
7623                 else
7624                         ret = i40e_aq_add_cloud_filters(hw,
7625                                         vsi->seid, &cld_filter->element, 1);
7626                 if (ret < 0) {
7627                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7628                         return -ENOTSUP;
7629                 }
7630                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7631                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7632                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7633         } else {
7634                 if (big_buffer)
7635                         ret = i40e_aq_remove_cloud_filters_big_buffer(
7636                                 hw, vsi->seid, cld_filter, 1);
7637                 else
7638                         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7639                                                    &cld_filter->element, 1);
7640                 if (ret < 0) {
7641                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7642                         return -ENOTSUP;
7643                 }
7644                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7645         }
7646
7647         rte_free(cld_filter);
7648         return ret;
7649 }
7650
7651 static int
7652 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
7653 {
7654         uint8_t i;
7655
7656         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7657                 if (pf->vxlan_ports[i] == port)
7658                         return i;
7659         }
7660
7661         return -1;
7662 }
7663
7664 static int
7665 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
7666 {
7667         int  idx, ret;
7668         uint8_t filter_idx;
7669         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7670
7671         idx = i40e_get_vxlan_port_idx(pf, port);
7672
7673         /* Check if port already exists */
7674         if (idx >= 0) {
7675                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
7676                 return -EINVAL;
7677         }
7678
7679         /* Now check if there is space to add the new port */
7680         idx = i40e_get_vxlan_port_idx(pf, 0);
7681         if (idx < 0) {
7682                 PMD_DRV_LOG(ERR,
7683                         "Maximum number of UDP ports reached, not adding port %d",
7684                         port);
7685                 return -ENOSPC;
7686         }
7687
7688         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
7689                                         &filter_idx, NULL);
7690         if (ret < 0) {
7691                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
7692                 return -1;
7693         }
7694
7695         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
7696                          port,  filter_idx);
7697
7698         /* New port: add it and mark its index in the bitmap */
7699         pf->vxlan_ports[idx] = port;
7700         pf->vxlan_bitmap |= (1 << idx);
7701
7702         if (!(pf->flags & I40E_FLAG_VXLAN))
7703                 pf->flags |= I40E_FLAG_VXLAN;
7704
7705         return 0;
7706 }
7707
7708 static int
7709 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
7710 {
7711         int idx;
7712         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7713
7714         if (!(pf->flags & I40E_FLAG_VXLAN)) {
7715                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
7716                 return -EINVAL;
7717         }
7718
7719         idx = i40e_get_vxlan_port_idx(pf, port);
7720
7721         if (idx < 0) {
7722                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
7723                 return -EINVAL;
7724         }
7725
7726         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
7727                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
7728                 return -1;
7729         }
7730
7731         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
7732                         port, idx);
7733
7734         pf->vxlan_ports[idx] = 0;
7735         pf->vxlan_bitmap &= ~(1 << idx);
7736
7737         if (!pf->vxlan_bitmap)
7738                 pf->flags &= ~I40E_FLAG_VXLAN;
7739
7740         return 0;
7741 }
7742
7743 /* Add UDP tunneling port */
7744 static int
7745 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
7746                              struct rte_eth_udp_tunnel *udp_tunnel)
7747 {
7748         int ret = 0;
7749         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7750
7751         if (udp_tunnel == NULL)
7752                 return -EINVAL;
7753
7754         switch (udp_tunnel->prot_type) {
7755         case RTE_TUNNEL_TYPE_VXLAN:
7756                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
7757                 break;
7758
7759         case RTE_TUNNEL_TYPE_GENEVE:
7760         case RTE_TUNNEL_TYPE_TEREDO:
7761                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7762                 ret = -1;
7763                 break;
7764
7765         default:
7766                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7767                 ret = -1;
7768                 break;
7769         }
7770
7771         return ret;
7772 }
7773
7774 /* Remove UDP tunneling port */
7775 static int
7776 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
7777                              struct rte_eth_udp_tunnel *udp_tunnel)
7778 {
7779         int ret = 0;
7780         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7781
7782         if (udp_tunnel == NULL)
7783                 return -EINVAL;
7784
7785         switch (udp_tunnel->prot_type) {
7786         case RTE_TUNNEL_TYPE_VXLAN:
7787                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
7788                 break;
7789         case RTE_TUNNEL_TYPE_GENEVE:
7790         case RTE_TUNNEL_TYPE_TEREDO:
7791                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7792                 ret = -1;
7793                 break;
7794         default:
7795                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7796                 ret = -1;
7797                 break;
7798         }
7799
7800         return ret;
7801 }
7802
7803 /* Calculate the maximum number of contiguous PF queues that are configured */
7804 static int
7805 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
7806 {
7807         struct rte_eth_dev_data *data = pf->dev_data;
7808         int i, num;
7809         struct i40e_rx_queue *rxq;
7810
7811         num = 0;
7812         for (i = 0; i < pf->lan_nb_qps; i++) {
7813                 rxq = data->rx_queues[i];
7814                 if (rxq && rxq->q_set)
7815                         num++;
7816                 else
7817                         break;
7818         }
7819
7820         return num;
7821 }
7822
7823 /* Configure RSS */
7824 static int
7825 i40e_pf_config_rss(struct i40e_pf *pf)
7826 {
7827         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7828         struct rte_eth_rss_conf rss_conf;
7829         uint32_t i, lut = 0;
7830         uint16_t j, num;
7831
7832         /*
7833          * If both VMDQ and RSS enabled, not all of PF queues are configured.
7834          * It's necessary to calculate the actual PF queues that are configured.
7835          */
7836         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
7837                 num = i40e_pf_calc_configured_queues_num(pf);
7838         else
7839                 num = pf->dev_data->nb_rx_queues;
7840
7841         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
7842         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
7843                         num);
7844
7845         if (num == 0) {
7846                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
7847                 return -ENOTSUP;
7848         }
7849
7850         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
7851                 if (j == num)
7852                         j = 0;
7853                 lut = (lut << 8) | (j & ((0x1 <<
7854                         hw->func_caps.rss_table_entry_width) - 1));
7855                 if ((i & 3) == 3)
7856                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
7857         }
7858
7859         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
7860         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
7861                 i40e_pf_disable_rss(pf);
7862                 return 0;
7863         }
7864         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
7865                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
7866                 /* Random default keys */
7867                 static uint32_t rss_key_default[] = {0x6b793944,
7868                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
7869                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
7870                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
7871
7872                 rss_conf.rss_key = (uint8_t *)rss_key_default;
7873                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7874                                                         sizeof(uint32_t);
7875         }
7876
7877         return i40e_hw_rss_hash_set(pf, &rss_conf);
7878 }
7879
7880 static int
7881 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
7882                                struct rte_eth_tunnel_filter_conf *filter)
7883 {
7884         if (pf == NULL || filter == NULL) {
7885                 PMD_DRV_LOG(ERR, "Invalid parameter");
7886                 return -EINVAL;
7887         }
7888
7889         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
7890                 PMD_DRV_LOG(ERR, "Invalid queue ID");
7891                 return -EINVAL;
7892         }
7893
7894         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
7895                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
7896                 return -EINVAL;
7897         }
7898
7899         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
7900                 (is_zero_ether_addr(&filter->outer_mac))) {
7901                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
7902                 return -EINVAL;
7903         }
7904
7905         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
7906                 (is_zero_ether_addr(&filter->inner_mac))) {
7907                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
7908                 return -EINVAL;
7909         }
7910
7911         return 0;
7912 }
7913
7914 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
7915 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
7916 static int
7917 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
7918 {
7919         uint32_t val, reg;
7920         int ret = -EINVAL;
7921
7922         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
7923         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
7924
7925         if (len == 3) {
7926                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
7927         } else if (len == 4) {
7928                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
7929         } else {
7930                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
7931                 return ret;
7932         }
7933
7934         if (reg != val) {
7935                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
7936                                                    reg, NULL);
7937                 if (ret != 0)
7938                         return ret;
7939         } else {
7940                 ret = 0;
7941         }
7942         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
7943                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
7944
7945         return ret;
7946 }
7947
7948 static int
7949 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
7950 {
7951         int ret = -EINVAL;
7952
7953         if (!hw || !cfg)
7954                 return -EINVAL;
7955
7956         switch (cfg->cfg_type) {
7957         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
7958                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
7959                 break;
7960         default:
7961                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
7962                 break;
7963         }
7964
7965         return ret;
7966 }
7967
7968 static int
7969 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
7970                                enum rte_filter_op filter_op,
7971                                void *arg)
7972 {
7973         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7974         int ret = I40E_ERR_PARAM;
7975
7976         switch (filter_op) {
7977         case RTE_ETH_FILTER_SET:
7978                 ret = i40e_dev_global_config_set(hw,
7979                         (struct rte_eth_global_cfg *)arg);
7980                 break;
7981         default:
7982                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
7983                 break;
7984         }
7985
7986         return ret;
7987 }
7988
7989 static int
7990 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
7991                           enum rte_filter_op filter_op,
7992                           void *arg)
7993 {
7994         struct rte_eth_tunnel_filter_conf *filter;
7995         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7996         int ret = I40E_SUCCESS;
7997
7998         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
7999
8000         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
8001                 return I40E_ERR_PARAM;
8002
8003         switch (filter_op) {
8004         case RTE_ETH_FILTER_NOP:
8005                 if (!(pf->flags & I40E_FLAG_VXLAN))
8006                         ret = I40E_NOT_SUPPORTED;
8007                 break;
8008         case RTE_ETH_FILTER_ADD:
8009                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
8010                 break;
8011         case RTE_ETH_FILTER_DELETE:
8012                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
8013                 break;
8014         default:
8015                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8016                 ret = I40E_ERR_PARAM;
8017                 break;
8018         }
8019
8020         return ret;
8021 }
8022
8023 static int
8024 i40e_pf_config_mq_rx(struct i40e_pf *pf)
8025 {
8026         int ret = 0;
8027         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8028
8029         /* RSS setup */
8030         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
8031                 ret = i40e_pf_config_rss(pf);
8032         else
8033                 i40e_pf_disable_rss(pf);
8034
8035         return ret;
8036 }
8037
8038 /* Get the symmetric hash enable configurations per port */
8039 static void
8040 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
8041 {
8042         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8043
8044         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
8045 }
8046
8047 /* Set the symmetric hash enable configurations per port */
8048 static void
8049 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8050 {
8051         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8052
8053         if (enable > 0) {
8054                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
8055                         PMD_DRV_LOG(INFO,
8056                                 "Symmetric hash has already been enabled");
8057                         return;
8058                 }
8059                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8060         } else {
8061                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
8062                         PMD_DRV_LOG(INFO,
8063                                 "Symmetric hash has already been disabled");
8064                         return;
8065                 }
8066                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8067         }
8068         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8069         I40E_WRITE_FLUSH(hw);
8070 }
8071
8072 /*
8073  * Get global configurations of hash function type and symmetric hash enable
8074  * per flow type (pctype). Note that global configuration means it affects all
8075  * the ports on the same NIC.
8076  */
8077 static int
8078 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
8079                                    struct rte_eth_hash_global_conf *g_cfg)
8080 {
8081         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8082         uint32_t reg;
8083         uint16_t i, j;
8084
8085         memset(g_cfg, 0, sizeof(*g_cfg));
8086         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8087         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
8088                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
8089         else
8090                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
8091         PMD_DRV_LOG(DEBUG, "Hash function is %s",
8092                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
8093
8094         /*
8095          * We work only with lowest 32 bits which is not correct, but to work
8096          * properly the valid_bit_mask size should be increased up to 64 bits
8097          * and this will brake ABI. This modification will be done in next
8098          * release
8099          */
8100         g_cfg->valid_bit_mask[0] = (uint32_t)adapter->flow_types_mask;
8101
8102         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT32_BIT; i++) {
8103                 if (!adapter->pctypes_tbl[i])
8104                         continue;
8105                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8106                      j < I40E_FILTER_PCTYPE_MAX; j++) {
8107                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
8108                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
8109                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8110                                         g_cfg->sym_hash_enable_mask[0] |=
8111                                                                 (1UL << i);
8112                                 }
8113                         }
8114                 }
8115         }
8116
8117         return 0;
8118 }
8119
8120 static int
8121 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
8122                               const struct rte_eth_hash_global_conf *g_cfg)
8123 {
8124         uint32_t i;
8125         uint32_t mask0, i40e_mask = adapter->flow_types_mask;
8126
8127         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
8128                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
8129                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
8130                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
8131                                                 g_cfg->hash_func);
8132                 return -EINVAL;
8133         }
8134
8135         /*
8136          * As i40e supports less than 32 flow types, only first 32 bits need to
8137          * be checked.
8138          */
8139         mask0 = g_cfg->valid_bit_mask[0];
8140         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8141                 if (i == 0) {
8142                         /* Check if any unsupported flow type configured */
8143                         if ((mask0 | i40e_mask) ^ i40e_mask)
8144                                 goto mask_err;
8145                 } else {
8146                         if (g_cfg->valid_bit_mask[i])
8147                                 goto mask_err;
8148                 }
8149         }
8150
8151         return 0;
8152
8153 mask_err:
8154         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
8155
8156         return -EINVAL;
8157 }
8158
8159 /*
8160  * Set global configurations of hash function type and symmetric hash enable
8161  * per flow type (pctype). Note any modifying global configuration will affect
8162  * all the ports on the same NIC.
8163  */
8164 static int
8165 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
8166                                    struct rte_eth_hash_global_conf *g_cfg)
8167 {
8168         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8169         int ret;
8170         uint16_t i, j;
8171         uint32_t reg;
8172         /*
8173          * We work only with lowest 32 bits which is not correct, but to work
8174          * properly the valid_bit_mask size should be increased up to 64 bits
8175          * and this will brake ABI. This modification will be done in next
8176          * release
8177          */
8178         uint32_t mask0 = g_cfg->valid_bit_mask[0] &
8179                                         (uint32_t)adapter->flow_types_mask;
8180
8181         /* Check the input parameters */
8182         ret = i40e_hash_global_config_check(adapter, g_cfg);
8183         if (ret < 0)
8184                 return ret;
8185
8186         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT32_BIT; i++) {
8187                 if (mask0 & (1UL << i)) {
8188                         reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
8189                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
8190
8191                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8192                              j < I40E_FILTER_PCTYPE_MAX; j++) {
8193                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
8194                                         i40e_write_rx_ctl(hw,
8195                                                           I40E_GLQF_HSYM(j),
8196                                                           reg);
8197                         }
8198                 }
8199         }
8200
8201         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8202         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
8203                 /* Toeplitz */
8204                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
8205                         PMD_DRV_LOG(DEBUG,
8206                                 "Hash function already set to Toeplitz");
8207                         goto out;
8208                 }
8209                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
8210         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
8211                 /* Simple XOR */
8212                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
8213                         PMD_DRV_LOG(DEBUG,
8214                                 "Hash function already set to Simple XOR");
8215                         goto out;
8216                 }
8217                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
8218         } else
8219                 /* Use the default, and keep it as it is */
8220                 goto out;
8221
8222         i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
8223
8224 out:
8225         I40E_WRITE_FLUSH(hw);
8226
8227         return 0;
8228 }
8229
8230 /**
8231  * Valid input sets for hash and flow director filters per PCTYPE
8232  */
8233 static uint64_t
8234 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8235                 enum rte_filter_type filter)
8236 {
8237         uint64_t valid;
8238
8239         static const uint64_t valid_hash_inset_table[] = {
8240                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8241                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8242                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8243                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8244                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8245                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8246                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8247                         I40E_INSET_FLEX_PAYLOAD,
8248                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8249                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8250                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8251                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8252                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8253                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8254                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8255                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8256                         I40E_INSET_FLEX_PAYLOAD,
8257                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8258                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8259                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8260                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8261                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8262                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8263                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8264                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8265                         I40E_INSET_FLEX_PAYLOAD,
8266                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8267                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8268                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8269                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8270                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8271                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8272                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8273                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8274                         I40E_INSET_FLEX_PAYLOAD,
8275                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8276                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8277                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8278                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8279                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8280                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8281                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8282                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8283                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8284                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8285                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8286                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8287                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8288                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8289                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8290                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8291                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8292                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8293                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8294                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8295                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8296                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8297                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8298                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8299                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8300                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8301                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
8302                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8303                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8304                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8305                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8306                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8307                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8308                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8309                         I40E_INSET_FLEX_PAYLOAD,
8310                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8311                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8312                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8313                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8314                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8315                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
8316                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
8317                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
8318                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8319                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8320                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8321                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8322                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8323                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8324                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8325                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
8326                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8327                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8328                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8329                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8330                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8331                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8332                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8333                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8334                         I40E_INSET_FLEX_PAYLOAD,
8335                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8336                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8337                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8338                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8339                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8340                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8341                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8342                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8343                         I40E_INSET_FLEX_PAYLOAD,
8344                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8345                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8346                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8347                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8348                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8349                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8350                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8351                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8352                         I40E_INSET_FLEX_PAYLOAD,
8353                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8354                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8355                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8356                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8357                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8358                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8359                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8360                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8361                         I40E_INSET_FLEX_PAYLOAD,
8362                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8363                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8364                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8365                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8366                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8367                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8368                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8369                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8370                         I40E_INSET_FLEX_PAYLOAD,
8371                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8372                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8373                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8374                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8375                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8376                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8377                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8378                         I40E_INSET_FLEX_PAYLOAD,
8379                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8380                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8381                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8382                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8383                         I40E_INSET_FLEX_PAYLOAD,
8384         };
8385
8386         /**
8387          * Flow director supports only fields defined in
8388          * union rte_eth_fdir_flow.
8389          */
8390         static const uint64_t valid_fdir_inset_table[] = {
8391                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8392                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8393                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8394                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8395                 I40E_INSET_IPV4_TTL,
8396                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8397                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8398                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8399                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8400                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8401                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8402                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8403                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8404                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8405                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8406                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8407                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8408                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8409                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8410                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8411                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8412                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8413                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8414                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8415                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8416                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8417                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8418                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8419                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8420                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8421                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8422                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8423                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8424                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8425                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8426                 I40E_INSET_SCTP_VT,
8427                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8428                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8429                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8430                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8431                 I40E_INSET_IPV4_TTL,
8432                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8433                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8434                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8435                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8436                 I40E_INSET_IPV6_HOP_LIMIT,
8437                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8438                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8439                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8440                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8441                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8442                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8443                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8444                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8445                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8446                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8447                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8448                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8449                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8450                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8451                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8452                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8453                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8454                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8455                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8456                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8457                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8458                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8459                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8460                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8461                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8462                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8463                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8464                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8465                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8466                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8467                 I40E_INSET_SCTP_VT,
8468                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8469                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8470                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8471                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8472                 I40E_INSET_IPV6_HOP_LIMIT,
8473                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8474                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8475                 I40E_INSET_LAST_ETHER_TYPE,
8476         };
8477
8478         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8479                 return 0;
8480         if (filter == RTE_ETH_FILTER_HASH)
8481                 valid = valid_hash_inset_table[pctype];
8482         else
8483                 valid = valid_fdir_inset_table[pctype];
8484
8485         return valid;
8486 }
8487
8488 /**
8489  * Validate if the input set is allowed for a specific PCTYPE
8490  */
8491 int
8492 i40e_validate_input_set(enum i40e_filter_pctype pctype,
8493                 enum rte_filter_type filter, uint64_t inset)
8494 {
8495         uint64_t valid;
8496
8497         valid = i40e_get_valid_input_set(pctype, filter);
8498         if (inset & (~valid))
8499                 return -EINVAL;
8500
8501         return 0;
8502 }
8503
8504 /* default input set fields combination per pctype */
8505 uint64_t
8506 i40e_get_default_input_set(uint16_t pctype)
8507 {
8508         static const uint64_t default_inset_table[] = {
8509                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8510                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8511                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8512                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8513                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8514                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8515                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8516                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8517                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8518                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8519                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8520                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8521                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8522                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8523                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8524                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8525                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8526                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8527                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8528                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8529                         I40E_INSET_SCTP_VT,
8530                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8531                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8532                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8533                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8534                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8535                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8536                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8537                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8538                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8539                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8540                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8541                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8542                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8543                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8544                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8545                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8546                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8547                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8548                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8549                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8550                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8551                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8552                         I40E_INSET_SCTP_VT,
8553                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8554                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8555                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8556                         I40E_INSET_LAST_ETHER_TYPE,
8557         };
8558
8559         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8560                 return 0;
8561
8562         return default_inset_table[pctype];
8563 }
8564
8565 /**
8566  * Parse the input set from index to logical bit masks
8567  */
8568 static int
8569 i40e_parse_input_set(uint64_t *inset,
8570                      enum i40e_filter_pctype pctype,
8571                      enum rte_eth_input_set_field *field,
8572                      uint16_t size)
8573 {
8574         uint16_t i, j;
8575         int ret = -EINVAL;
8576
8577         static const struct {
8578                 enum rte_eth_input_set_field field;
8579                 uint64_t inset;
8580         } inset_convert_table[] = {
8581                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
8582                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
8583                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
8584                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
8585                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
8586                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
8587                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
8588                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
8589                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
8590                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
8591                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
8592                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
8593                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
8594                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
8595                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
8596                         I40E_INSET_IPV6_NEXT_HDR},
8597                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
8598                         I40E_INSET_IPV6_HOP_LIMIT},
8599                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
8600                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
8601                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
8602                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
8603                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
8604                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
8605                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
8606                         I40E_INSET_SCTP_VT},
8607                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
8608                         I40E_INSET_TUNNEL_DMAC},
8609                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
8610                         I40E_INSET_VLAN_TUNNEL},
8611                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
8612                         I40E_INSET_TUNNEL_ID},
8613                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
8614                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
8615                         I40E_INSET_FLEX_PAYLOAD_W1},
8616                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
8617                         I40E_INSET_FLEX_PAYLOAD_W2},
8618                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
8619                         I40E_INSET_FLEX_PAYLOAD_W3},
8620                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
8621                         I40E_INSET_FLEX_PAYLOAD_W4},
8622                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
8623                         I40E_INSET_FLEX_PAYLOAD_W5},
8624                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
8625                         I40E_INSET_FLEX_PAYLOAD_W6},
8626                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
8627                         I40E_INSET_FLEX_PAYLOAD_W7},
8628                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
8629                         I40E_INSET_FLEX_PAYLOAD_W8},
8630         };
8631
8632         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
8633                 return ret;
8634
8635         /* Only one item allowed for default or all */
8636         if (size == 1) {
8637                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
8638                         *inset = i40e_get_default_input_set(pctype);
8639                         return 0;
8640                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
8641                         *inset = I40E_INSET_NONE;
8642                         return 0;
8643                 }
8644         }
8645
8646         for (i = 0, *inset = 0; i < size; i++) {
8647                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
8648                         if (field[i] == inset_convert_table[j].field) {
8649                                 *inset |= inset_convert_table[j].inset;
8650                                 break;
8651                         }
8652                 }
8653
8654                 /* It contains unsupported input set, return immediately */
8655                 if (j == RTE_DIM(inset_convert_table))
8656                         return ret;
8657         }
8658
8659         return 0;
8660 }
8661
8662 /**
8663  * Translate the input set from bit masks to register aware bit masks
8664  * and vice versa
8665  */
8666 uint64_t
8667 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
8668 {
8669         uint64_t val = 0;
8670         uint16_t i;
8671
8672         struct inset_map {
8673                 uint64_t inset;
8674                 uint64_t inset_reg;
8675         };
8676
8677         static const struct inset_map inset_map_common[] = {
8678                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
8679                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
8680                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
8681                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
8682                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
8683                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
8684                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
8685                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
8686                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
8687                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
8688                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
8689                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
8690                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
8691                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
8692                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
8693                 {I40E_INSET_TUNNEL_DMAC,
8694                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
8695                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
8696                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
8697                 {I40E_INSET_TUNNEL_SRC_PORT,
8698                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
8699                 {I40E_INSET_TUNNEL_DST_PORT,
8700                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
8701                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
8702                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
8703                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
8704                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
8705                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
8706                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
8707                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
8708                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
8709                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
8710         };
8711
8712     /* some different registers map in x722*/
8713         static const struct inset_map inset_map_diff_x722[] = {
8714                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
8715                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
8716                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
8717                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
8718         };
8719
8720         static const struct inset_map inset_map_diff_not_x722[] = {
8721                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
8722                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
8723                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
8724                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
8725         };
8726
8727         if (input == 0)
8728                 return val;
8729
8730         /* Translate input set to register aware inset */
8731         if (type == I40E_MAC_X722) {
8732                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
8733                         if (input & inset_map_diff_x722[i].inset)
8734                                 val |= inset_map_diff_x722[i].inset_reg;
8735                 }
8736         } else {
8737                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
8738                         if (input & inset_map_diff_not_x722[i].inset)
8739                                 val |= inset_map_diff_not_x722[i].inset_reg;
8740                 }
8741         }
8742
8743         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
8744                 if (input & inset_map_common[i].inset)
8745                         val |= inset_map_common[i].inset_reg;
8746         }
8747
8748         return val;
8749 }
8750
8751 int
8752 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
8753 {
8754         uint8_t i, idx = 0;
8755         uint64_t inset_need_mask = inset;
8756
8757         static const struct {
8758                 uint64_t inset;
8759                 uint32_t mask;
8760         } inset_mask_map[] = {
8761                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
8762                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
8763                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
8764                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
8765                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
8766                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
8767                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
8768                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
8769         };
8770
8771         if (!inset || !mask || !nb_elem)
8772                 return 0;
8773
8774         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
8775                 /* Clear the inset bit, if no MASK is required,
8776                  * for example proto + ttl
8777                  */
8778                 if ((inset & inset_mask_map[i].inset) ==
8779                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
8780                         inset_need_mask &= ~inset_mask_map[i].inset;
8781                 if (!inset_need_mask)
8782                         return 0;
8783         }
8784         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
8785                 if ((inset_need_mask & inset_mask_map[i].inset) ==
8786                     inset_mask_map[i].inset) {
8787                         if (idx >= nb_elem) {
8788                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
8789                                 return -EINVAL;
8790                         }
8791                         mask[idx] = inset_mask_map[i].mask;
8792                         idx++;
8793                 }
8794         }
8795
8796         return idx;
8797 }
8798
8799 void
8800 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
8801 {
8802         uint32_t reg = i40e_read_rx_ctl(hw, addr);
8803
8804         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
8805         if (reg != val)
8806                 i40e_write_rx_ctl(hw, addr, val);
8807         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
8808                     (uint32_t)i40e_read_rx_ctl(hw, addr));
8809 }
8810
8811 static void
8812 i40e_filter_input_set_init(struct i40e_pf *pf)
8813 {
8814         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8815         enum i40e_filter_pctype pctype;
8816         uint64_t input_set, inset_reg;
8817         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8818         int num, i;
8819         uint16_t flow_type;
8820
8821         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
8822              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
8823                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
8824
8825                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
8826                         continue;
8827
8828                 input_set = i40e_get_default_input_set(pctype);
8829
8830                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8831                                                    I40E_INSET_MASK_NUM_REG);
8832                 if (num < 0)
8833                         return;
8834                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
8835                                         input_set);
8836
8837                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
8838                                       (uint32_t)(inset_reg & UINT32_MAX));
8839                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
8840                                      (uint32_t)((inset_reg >>
8841                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
8842                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
8843                                       (uint32_t)(inset_reg & UINT32_MAX));
8844                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
8845                                      (uint32_t)((inset_reg >>
8846                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
8847
8848                 for (i = 0; i < num; i++) {
8849                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8850                                              mask_reg[i]);
8851                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8852                                              mask_reg[i]);
8853                 }
8854                 /*clear unused mask registers of the pctype */
8855                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
8856                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8857                                              0);
8858                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8859                                              0);
8860                 }
8861                 I40E_WRITE_FLUSH(hw);
8862
8863                 /* store the default input set */
8864                 pf->hash_input_set[pctype] = input_set;
8865                 pf->fdir.input_set[pctype] = input_set;
8866         }
8867 }
8868
8869 int
8870 i40e_hash_filter_inset_select(struct i40e_hw *hw,
8871                          struct rte_eth_input_set_conf *conf)
8872 {
8873         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8874         enum i40e_filter_pctype pctype;
8875         uint64_t input_set, inset_reg = 0;
8876         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8877         int ret, i, num;
8878
8879         if (!conf) {
8880                 PMD_DRV_LOG(ERR, "Invalid pointer");
8881                 return -EFAULT;
8882         }
8883         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
8884             conf->op != RTE_ETH_INPUT_SET_ADD) {
8885                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
8886                 return -EINVAL;
8887         }
8888
8889         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
8890         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
8891                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
8892                 return -EINVAL;
8893         }
8894
8895         if (hw->mac.type == I40E_MAC_X722) {
8896                 /* get translated pctype value in fd pctype register */
8897                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
8898                         I40E_GLQF_FD_PCTYPES((int)pctype));
8899         }
8900
8901         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
8902                                    conf->inset_size);
8903         if (ret) {
8904                 PMD_DRV_LOG(ERR, "Failed to parse input set");
8905                 return -EINVAL;
8906         }
8907
8908         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
8909                 /* get inset value in register */
8910                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
8911                 inset_reg <<= I40E_32_BIT_WIDTH;
8912                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
8913                 input_set |= pf->hash_input_set[pctype];
8914         }
8915         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8916                                            I40E_INSET_MASK_NUM_REG);
8917         if (num < 0)
8918                 return -EINVAL;
8919
8920         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
8921
8922         i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
8923                               (uint32_t)(inset_reg & UINT32_MAX));
8924         i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
8925                              (uint32_t)((inset_reg >>
8926                              I40E_32_BIT_WIDTH) & UINT32_MAX));
8927
8928         for (i = 0; i < num; i++)
8929                 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8930                                      mask_reg[i]);
8931         /*clear unused mask registers of the pctype */
8932         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
8933                 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8934                                      0);
8935         I40E_WRITE_FLUSH(hw);
8936
8937         pf->hash_input_set[pctype] = input_set;
8938         return 0;
8939 }
8940
8941 int
8942 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
8943                          struct rte_eth_input_set_conf *conf)
8944 {
8945         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8946         enum i40e_filter_pctype pctype;
8947         uint64_t input_set, inset_reg = 0;
8948         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8949         int ret, i, num;
8950
8951         if (!hw || !conf) {
8952                 PMD_DRV_LOG(ERR, "Invalid pointer");
8953                 return -EFAULT;
8954         }
8955         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
8956             conf->op != RTE_ETH_INPUT_SET_ADD) {
8957                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
8958                 return -EINVAL;
8959         }
8960
8961         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
8962
8963         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
8964                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
8965                 return -EINVAL;
8966         }
8967
8968         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
8969                                    conf->inset_size);
8970         if (ret) {
8971                 PMD_DRV_LOG(ERR, "Failed to parse input set");
8972                 return -EINVAL;
8973         }
8974
8975         /* get inset value in register */
8976         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
8977         inset_reg <<= I40E_32_BIT_WIDTH;
8978         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
8979
8980         /* Can not change the inset reg for flex payload for fdir,
8981          * it is done by writing I40E_PRTQF_FD_FLXINSET
8982          * in i40e_set_flex_mask_on_pctype.
8983          */
8984         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
8985                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
8986         else
8987                 input_set |= pf->fdir.input_set[pctype];
8988         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8989                                            I40E_INSET_MASK_NUM_REG);
8990         if (num < 0)
8991                 return -EINVAL;
8992
8993         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
8994
8995         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
8996                               (uint32_t)(inset_reg & UINT32_MAX));
8997         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
8998                              (uint32_t)((inset_reg >>
8999                              I40E_32_BIT_WIDTH) & UINT32_MAX));
9000
9001         for (i = 0; i < num; i++)
9002                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
9003                                      mask_reg[i]);
9004         /*clear unused mask registers of the pctype */
9005         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9006                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
9007                                      0);
9008         I40E_WRITE_FLUSH(hw);
9009
9010         pf->fdir.input_set[pctype] = input_set;
9011         return 0;
9012 }
9013
9014 static int
9015 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9016 {
9017         int ret = 0;
9018
9019         if (!hw || !info) {
9020                 PMD_DRV_LOG(ERR, "Invalid pointer");
9021                 return -EFAULT;
9022         }
9023
9024         switch (info->info_type) {
9025         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9026                 i40e_get_symmetric_hash_enable_per_port(hw,
9027                                         &(info->info.enable));
9028                 break;
9029         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9030                 ret = i40e_get_hash_filter_global_config(hw,
9031                                 &(info->info.global_conf));
9032                 break;
9033         default:
9034                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9035                                                         info->info_type);
9036                 ret = -EINVAL;
9037                 break;
9038         }
9039
9040         return ret;
9041 }
9042
9043 static int
9044 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9045 {
9046         int ret = 0;
9047
9048         if (!hw || !info) {
9049                 PMD_DRV_LOG(ERR, "Invalid pointer");
9050                 return -EFAULT;
9051         }
9052
9053         switch (info->info_type) {
9054         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9055                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
9056                 break;
9057         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9058                 ret = i40e_set_hash_filter_global_config(hw,
9059                                 &(info->info.global_conf));
9060                 break;
9061         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
9062                 ret = i40e_hash_filter_inset_select(hw,
9063                                                &(info->info.input_set_conf));
9064                 break;
9065
9066         default:
9067                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9068                                                         info->info_type);
9069                 ret = -EINVAL;
9070                 break;
9071         }
9072
9073         return ret;
9074 }
9075
9076 /* Operations for hash function */
9077 static int
9078 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
9079                       enum rte_filter_op filter_op,
9080                       void *arg)
9081 {
9082         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9083         int ret = 0;
9084
9085         switch (filter_op) {
9086         case RTE_ETH_FILTER_NOP:
9087                 break;
9088         case RTE_ETH_FILTER_GET:
9089                 ret = i40e_hash_filter_get(hw,
9090                         (struct rte_eth_hash_filter_info *)arg);
9091                 break;
9092         case RTE_ETH_FILTER_SET:
9093                 ret = i40e_hash_filter_set(hw,
9094                         (struct rte_eth_hash_filter_info *)arg);
9095                 break;
9096         default:
9097                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
9098                                                                 filter_op);
9099                 ret = -ENOTSUP;
9100                 break;
9101         }
9102
9103         return ret;
9104 }
9105
9106 /* Convert ethertype filter structure */
9107 static int
9108 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9109                               struct i40e_ethertype_filter *filter)
9110 {
9111         rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
9112         filter->input.ether_type = input->ether_type;
9113         filter->flags = input->flags;
9114         filter->queue = input->queue;
9115
9116         return 0;
9117 }
9118
9119 /* Check if there exists the ehtertype filter */
9120 struct i40e_ethertype_filter *
9121 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9122                                 const struct i40e_ethertype_filter_input *input)
9123 {
9124         int ret;
9125
9126         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9127         if (ret < 0)
9128                 return NULL;
9129
9130         return ethertype_rule->hash_map[ret];
9131 }
9132
9133 /* Add ethertype filter in SW list */
9134 static int
9135 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9136                                 struct i40e_ethertype_filter *filter)
9137 {
9138         struct i40e_ethertype_rule *rule = &pf->ethertype;
9139         int ret;
9140
9141         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9142         if (ret < 0) {
9143                 PMD_DRV_LOG(ERR,
9144                             "Failed to insert ethertype filter"
9145                             " to hash table %d!",
9146                             ret);
9147                 return ret;
9148         }
9149         rule->hash_map[ret] = filter;
9150
9151         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9152
9153         return 0;
9154 }
9155
9156 /* Delete ethertype filter in SW list */
9157 int
9158 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9159                              struct i40e_ethertype_filter_input *input)
9160 {
9161         struct i40e_ethertype_rule *rule = &pf->ethertype;
9162         struct i40e_ethertype_filter *filter;
9163         int ret;
9164
9165         ret = rte_hash_del_key(rule->hash_table, input);
9166         if (ret < 0) {
9167                 PMD_DRV_LOG(ERR,
9168                             "Failed to delete ethertype filter"
9169                             " to hash table %d!",
9170                             ret);
9171                 return ret;
9172         }
9173         filter = rule->hash_map[ret];
9174         rule->hash_map[ret] = NULL;
9175
9176         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9177         rte_free(filter);
9178
9179         return 0;
9180 }
9181
9182 /*
9183  * Configure ethertype filter, which can director packet by filtering
9184  * with mac address and ether_type or only ether_type
9185  */
9186 int
9187 i40e_ethertype_filter_set(struct i40e_pf *pf,
9188                         struct rte_eth_ethertype_filter *filter,
9189                         bool add)
9190 {
9191         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9192         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9193         struct i40e_ethertype_filter *ethertype_filter, *node;
9194         struct i40e_ethertype_filter check_filter;
9195         struct i40e_control_filter_stats stats;
9196         uint16_t flags = 0;
9197         int ret;
9198
9199         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9200                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9201                 return -EINVAL;
9202         }
9203         if (filter->ether_type == ETHER_TYPE_IPv4 ||
9204                 filter->ether_type == ETHER_TYPE_IPv6) {
9205                 PMD_DRV_LOG(ERR,
9206                         "unsupported ether_type(0x%04x) in control packet filter.",
9207                         filter->ether_type);
9208                 return -EINVAL;
9209         }
9210         if (filter->ether_type == ETHER_TYPE_VLAN)
9211                 PMD_DRV_LOG(WARNING,
9212                         "filter vlan ether_type in first tag is not supported.");
9213
9214         /* Check if there is the filter in SW list */
9215         memset(&check_filter, 0, sizeof(check_filter));
9216         i40e_ethertype_filter_convert(filter, &check_filter);
9217         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9218                                                &check_filter.input);
9219         if (add && node) {
9220                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9221                 return -EINVAL;
9222         }
9223
9224         if (!add && !node) {
9225                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9226                 return -EINVAL;
9227         }
9228
9229         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9230                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9231         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9232                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9233         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9234
9235         memset(&stats, 0, sizeof(stats));
9236         ret = i40e_aq_add_rem_control_packet_filter(hw,
9237                         filter->mac_addr.addr_bytes,
9238                         filter->ether_type, flags,
9239                         pf->main_vsi->seid,
9240                         filter->queue, add, &stats, NULL);
9241
9242         PMD_DRV_LOG(INFO,
9243                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9244                 ret, stats.mac_etype_used, stats.etype_used,
9245                 stats.mac_etype_free, stats.etype_free);
9246         if (ret < 0)
9247                 return -ENOSYS;
9248
9249         /* Add or delete a filter in SW list */
9250         if (add) {
9251                 ethertype_filter = rte_zmalloc("ethertype_filter",
9252                                        sizeof(*ethertype_filter), 0);
9253                 rte_memcpy(ethertype_filter, &check_filter,
9254                            sizeof(check_filter));
9255                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9256         } else {
9257                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9258         }
9259
9260         return ret;
9261 }
9262
9263 /*
9264  * Handle operations for ethertype filter.
9265  */
9266 static int
9267 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
9268                                 enum rte_filter_op filter_op,
9269                                 void *arg)
9270 {
9271         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9272         int ret = 0;
9273
9274         if (filter_op == RTE_ETH_FILTER_NOP)
9275                 return ret;
9276
9277         if (arg == NULL) {
9278                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
9279                             filter_op);
9280                 return -EINVAL;
9281         }
9282
9283         switch (filter_op) {
9284         case RTE_ETH_FILTER_ADD:
9285                 ret = i40e_ethertype_filter_set(pf,
9286                         (struct rte_eth_ethertype_filter *)arg,
9287                         TRUE);
9288                 break;
9289         case RTE_ETH_FILTER_DELETE:
9290                 ret = i40e_ethertype_filter_set(pf,
9291                         (struct rte_eth_ethertype_filter *)arg,
9292                         FALSE);
9293                 break;
9294         default:
9295                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
9296                 ret = -ENOSYS;
9297                 break;
9298         }
9299         return ret;
9300 }
9301
9302 static int
9303 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9304                      enum rte_filter_type filter_type,
9305                      enum rte_filter_op filter_op,
9306                      void *arg)
9307 {
9308         int ret = 0;
9309
9310         if (dev == NULL)
9311                 return -EINVAL;
9312
9313         switch (filter_type) {
9314         case RTE_ETH_FILTER_NONE:
9315                 /* For global configuration */
9316                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9317                 break;
9318         case RTE_ETH_FILTER_HASH:
9319                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9320                 break;
9321         case RTE_ETH_FILTER_MACVLAN:
9322                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9323                 break;
9324         case RTE_ETH_FILTER_ETHERTYPE:
9325                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9326                 break;
9327         case RTE_ETH_FILTER_TUNNEL:
9328                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9329                 break;
9330         case RTE_ETH_FILTER_FDIR:
9331                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9332                 break;
9333         case RTE_ETH_FILTER_GENERIC:
9334                 if (filter_op != RTE_ETH_FILTER_GET)
9335                         return -EINVAL;
9336                 *(const void **)arg = &i40e_flow_ops;
9337                 break;
9338         default:
9339                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9340                                                         filter_type);
9341                 ret = -EINVAL;
9342                 break;
9343         }
9344
9345         return ret;
9346 }
9347
9348 /*
9349  * Check and enable Extended Tag.
9350  * Enabling Extended Tag is important for 40G performance.
9351  */
9352 static void
9353 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9354 {
9355         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9356         uint32_t buf = 0;
9357         int ret;
9358
9359         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9360                                       PCI_DEV_CAP_REG);
9361         if (ret < 0) {
9362                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9363                             PCI_DEV_CAP_REG);
9364                 return;
9365         }
9366         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9367                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9368                 return;
9369         }
9370
9371         buf = 0;
9372         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9373                                       PCI_DEV_CTRL_REG);
9374         if (ret < 0) {
9375                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9376                             PCI_DEV_CTRL_REG);
9377                 return;
9378         }
9379         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9380                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9381                 return;
9382         }
9383         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9384         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9385                                        PCI_DEV_CTRL_REG);
9386         if (ret < 0) {
9387                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9388                             PCI_DEV_CTRL_REG);
9389                 return;
9390         }
9391 }
9392
9393 /*
9394  * As some registers wouldn't be reset unless a global hardware reset,
9395  * hardware initialization is needed to put those registers into an
9396  * expected initial state.
9397  */
9398 static void
9399 i40e_hw_init(struct rte_eth_dev *dev)
9400 {
9401         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9402
9403         i40e_enable_extended_tag(dev);
9404
9405         /* clear the PF Queue Filter control register */
9406         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9407
9408         /* Disable symmetric hash per port */
9409         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9410 }
9411
9412 /*
9413  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9414  * however this function will return only one highest pctype index,
9415  * which is not quite correct. This is known problem of i40e driver
9416  * and needs to be fixed later.
9417  */
9418 enum i40e_filter_pctype
9419 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9420 {
9421         int i;
9422         uint64_t pctype_mask;
9423
9424         if (flow_type < I40E_FLOW_TYPE_MAX) {
9425                 pctype_mask = adapter->pctypes_tbl[flow_type];
9426                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9427                         if (pctype_mask & (1ULL << i))
9428                                 return (enum i40e_filter_pctype)i;
9429                 }
9430         }
9431         return I40E_FILTER_PCTYPE_INVALID;
9432 }
9433
9434 uint16_t
9435 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9436                         enum i40e_filter_pctype pctype)
9437 {
9438         uint16_t flowtype;
9439         uint64_t pctype_mask = 1ULL << pctype;
9440
9441         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9442              flowtype++) {
9443                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9444                         return flowtype;
9445         }
9446
9447         return RTE_ETH_FLOW_UNKNOWN;
9448 }
9449
9450 /*
9451  * On X710, performance number is far from the expectation on recent firmware
9452  * versions; on XL710, performance number is also far from the expectation on
9453  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9454  * mode is enabled and port MAC address is equal to the packet destination MAC
9455  * address. The fix for this issue may not be integrated in the following
9456  * firmware version. So the workaround in software driver is needed. It needs
9457  * to modify the initial values of 3 internal only registers for both X710 and
9458  * XL710. Note that the values for X710 or XL710 could be different, and the
9459  * workaround can be removed when it is fixed in firmware in the future.
9460  */
9461
9462 /* For both X710 and XL710 */
9463 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
9464 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
9465 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
9466
9467 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9468 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9469
9470 /* For X722 */
9471 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9472 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9473
9474 /* For X710 */
9475 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
9476 /* For XL710 */
9477 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
9478 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
9479
9480 static int
9481 i40e_dev_sync_phy_type(struct i40e_hw *hw)
9482 {
9483         enum i40e_status_code status;
9484         struct i40e_aq_get_phy_abilities_resp phy_ab;
9485         int ret = -ENOTSUP;
9486         int retries = 0;
9487
9488         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
9489                                               NULL);
9490
9491         while (status) {
9492                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
9493                         status);
9494                 retries++;
9495                 rte_delay_us(100000);
9496                 if  (retries < 5)
9497                         status = i40e_aq_get_phy_capabilities(hw, false,
9498                                         true, &phy_ab, NULL);
9499                 else
9500                         return ret;
9501         }
9502         return 0;
9503 }
9504
9505 static void
9506 i40e_configure_registers(struct i40e_hw *hw)
9507 {
9508         static struct {
9509                 uint32_t addr;
9510                 uint64_t val;
9511         } reg_table[] = {
9512                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
9513                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
9514                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
9515         };
9516         uint64_t reg;
9517         uint32_t i;
9518         int ret;
9519
9520         for (i = 0; i < RTE_DIM(reg_table); i++) {
9521                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
9522                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9523                                 reg_table[i].val =
9524                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
9525                         else /* For X710/XL710/XXV710 */
9526                                 if (hw->aq.fw_maj_ver < 6)
9527                                         reg_table[i].val =
9528                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
9529                                 else
9530                                         reg_table[i].val =
9531                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
9532                 }
9533
9534                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
9535                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9536                                 reg_table[i].val =
9537                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9538                         else /* For X710/XL710/XXV710 */
9539                                 reg_table[i].val =
9540                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9541                 }
9542
9543                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
9544                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
9545                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
9546                                 reg_table[i].val =
9547                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
9548                         else /* For X710 */
9549                                 reg_table[i].val =
9550                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
9551                 }
9552
9553                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
9554                                                         &reg, NULL);
9555                 if (ret < 0) {
9556                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
9557                                                         reg_table[i].addr);
9558                         break;
9559                 }
9560                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
9561                                                 reg_table[i].addr, reg);
9562                 if (reg == reg_table[i].val)
9563                         continue;
9564
9565                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
9566                                                 reg_table[i].val, NULL);
9567                 if (ret < 0) {
9568                         PMD_DRV_LOG(ERR,
9569                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
9570                                 reg_table[i].val, reg_table[i].addr);
9571                         break;
9572                 }
9573                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
9574                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
9575         }
9576 }
9577
9578 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
9579 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
9580 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
9581 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
9582 static int
9583 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
9584 {
9585         uint32_t reg;
9586         int ret;
9587
9588         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
9589                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
9590                 return -EINVAL;
9591         }
9592
9593         /* Configure for double VLAN RX stripping */
9594         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
9595         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
9596                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
9597                 ret = i40e_aq_debug_write_register(hw,
9598                                                    I40E_VSI_TSR(vsi->vsi_id),
9599                                                    reg, NULL);
9600                 if (ret < 0) {
9601                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
9602                                     vsi->vsi_id);
9603                         return I40E_ERR_CONFIG;
9604                 }
9605         }
9606
9607         /* Configure for double VLAN TX insertion */
9608         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
9609         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
9610                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
9611                 ret = i40e_aq_debug_write_register(hw,
9612                                                    I40E_VSI_L2TAGSTXVALID(
9613                                                    vsi->vsi_id), reg, NULL);
9614                 if (ret < 0) {
9615                         PMD_DRV_LOG(ERR,
9616                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
9617                                 vsi->vsi_id);
9618                         return I40E_ERR_CONFIG;
9619                 }
9620         }
9621
9622         return 0;
9623 }
9624
9625 /**
9626  * i40e_aq_add_mirror_rule
9627  * @hw: pointer to the hardware structure
9628  * @seid: VEB seid to add mirror rule to
9629  * @dst_id: destination vsi seid
9630  * @entries: Buffer which contains the entities to be mirrored
9631  * @count: number of entities contained in the buffer
9632  * @rule_id:the rule_id of the rule to be added
9633  *
9634  * Add a mirror rule for a given veb.
9635  *
9636  **/
9637 static enum i40e_status_code
9638 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
9639                         uint16_t seid, uint16_t dst_id,
9640                         uint16_t rule_type, uint16_t *entries,
9641                         uint16_t count, uint16_t *rule_id)
9642 {
9643         struct i40e_aq_desc desc;
9644         struct i40e_aqc_add_delete_mirror_rule cmd;
9645         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
9646                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
9647                 &desc.params.raw;
9648         uint16_t buff_len;
9649         enum i40e_status_code status;
9650
9651         i40e_fill_default_direct_cmd_desc(&desc,
9652                                           i40e_aqc_opc_add_mirror_rule);
9653         memset(&cmd, 0, sizeof(cmd));
9654
9655         buff_len = sizeof(uint16_t) * count;
9656         desc.datalen = rte_cpu_to_le_16(buff_len);
9657         if (buff_len > 0)
9658                 desc.flags |= rte_cpu_to_le_16(
9659                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
9660         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
9661                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
9662         cmd.num_entries = rte_cpu_to_le_16(count);
9663         cmd.seid = rte_cpu_to_le_16(seid);
9664         cmd.destination = rte_cpu_to_le_16(dst_id);
9665
9666         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
9667         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
9668         PMD_DRV_LOG(INFO,
9669                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
9670                 hw->aq.asq_last_status, resp->rule_id,
9671                 resp->mirror_rules_used, resp->mirror_rules_free);
9672         *rule_id = rte_le_to_cpu_16(resp->rule_id);
9673
9674         return status;
9675 }
9676
9677 /**
9678  * i40e_aq_del_mirror_rule
9679  * @hw: pointer to the hardware structure
9680  * @seid: VEB seid to add mirror rule to
9681  * @entries: Buffer which contains the entities to be mirrored
9682  * @count: number of entities contained in the buffer
9683  * @rule_id:the rule_id of the rule to be delete
9684  *
9685  * Delete a mirror rule for a given veb.
9686  *
9687  **/
9688 static enum i40e_status_code
9689 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
9690                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
9691                 uint16_t count, uint16_t rule_id)
9692 {
9693         struct i40e_aq_desc desc;
9694         struct i40e_aqc_add_delete_mirror_rule cmd;
9695         uint16_t buff_len = 0;
9696         enum i40e_status_code status;
9697         void *buff = NULL;
9698
9699         i40e_fill_default_direct_cmd_desc(&desc,
9700                                           i40e_aqc_opc_delete_mirror_rule);
9701         memset(&cmd, 0, sizeof(cmd));
9702         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
9703                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
9704                                                           I40E_AQ_FLAG_RD));
9705                 cmd.num_entries = count;
9706                 buff_len = sizeof(uint16_t) * count;
9707                 desc.datalen = rte_cpu_to_le_16(buff_len);
9708                 buff = (void *)entries;
9709         } else
9710                 /* rule id is filled in destination field for deleting mirror rule */
9711                 cmd.destination = rte_cpu_to_le_16(rule_id);
9712
9713         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
9714                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
9715         cmd.seid = rte_cpu_to_le_16(seid);
9716
9717         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
9718         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
9719
9720         return status;
9721 }
9722
9723 /**
9724  * i40e_mirror_rule_set
9725  * @dev: pointer to the hardware structure
9726  * @mirror_conf: mirror rule info
9727  * @sw_id: mirror rule's sw_id
9728  * @on: enable/disable
9729  *
9730  * set a mirror rule.
9731  *
9732  **/
9733 static int
9734 i40e_mirror_rule_set(struct rte_eth_dev *dev,
9735                         struct rte_eth_mirror_conf *mirror_conf,
9736                         uint8_t sw_id, uint8_t on)
9737 {
9738         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9739         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9740         struct i40e_mirror_rule *it, *mirr_rule = NULL;
9741         struct i40e_mirror_rule *parent = NULL;
9742         uint16_t seid, dst_seid, rule_id;
9743         uint16_t i, j = 0;
9744         int ret;
9745
9746         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
9747
9748         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
9749                 PMD_DRV_LOG(ERR,
9750                         "mirror rule can not be configured without veb or vfs.");
9751                 return -ENOSYS;
9752         }
9753         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
9754                 PMD_DRV_LOG(ERR, "mirror table is full.");
9755                 return -ENOSPC;
9756         }
9757         if (mirror_conf->dst_pool > pf->vf_num) {
9758                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
9759                                  mirror_conf->dst_pool);
9760                 return -EINVAL;
9761         }
9762
9763         seid = pf->main_vsi->veb->seid;
9764
9765         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
9766                 if (sw_id <= it->index) {
9767                         mirr_rule = it;
9768                         break;
9769                 }
9770                 parent = it;
9771         }
9772         if (mirr_rule && sw_id == mirr_rule->index) {
9773                 if (on) {
9774                         PMD_DRV_LOG(ERR, "mirror rule exists.");
9775                         return -EEXIST;
9776                 } else {
9777                         ret = i40e_aq_del_mirror_rule(hw, seid,
9778                                         mirr_rule->rule_type,
9779                                         mirr_rule->entries,
9780                                         mirr_rule->num_entries, mirr_rule->id);
9781                         if (ret < 0) {
9782                                 PMD_DRV_LOG(ERR,
9783                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
9784                                         ret, hw->aq.asq_last_status);
9785                                 return -ENOSYS;
9786                         }
9787                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
9788                         rte_free(mirr_rule);
9789                         pf->nb_mirror_rule--;
9790                         return 0;
9791                 }
9792         } else if (!on) {
9793                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
9794                 return -ENOENT;
9795         }
9796
9797         mirr_rule = rte_zmalloc("i40e_mirror_rule",
9798                                 sizeof(struct i40e_mirror_rule) , 0);
9799         if (!mirr_rule) {
9800                 PMD_DRV_LOG(ERR, "failed to allocate memory");
9801                 return I40E_ERR_NO_MEMORY;
9802         }
9803         switch (mirror_conf->rule_type) {
9804         case ETH_MIRROR_VLAN:
9805                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
9806                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
9807                                 mirr_rule->entries[j] =
9808                                         mirror_conf->vlan.vlan_id[i];
9809                                 j++;
9810                         }
9811                 }
9812                 if (j == 0) {
9813                         PMD_DRV_LOG(ERR, "vlan is not specified.");
9814                         rte_free(mirr_rule);
9815                         return -EINVAL;
9816                 }
9817                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
9818                 break;
9819         case ETH_MIRROR_VIRTUAL_POOL_UP:
9820         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
9821                 /* check if the specified pool bit is out of range */
9822                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
9823                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
9824                         rte_free(mirr_rule);
9825                         return -EINVAL;
9826                 }
9827                 for (i = 0, j = 0; i < pf->vf_num; i++) {
9828                         if (mirror_conf->pool_mask & (1ULL << i)) {
9829                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
9830                                 j++;
9831                         }
9832                 }
9833                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
9834                         /* add pf vsi to entries */
9835                         mirr_rule->entries[j] = pf->main_vsi_seid;
9836                         j++;
9837                 }
9838                 if (j == 0) {
9839                         PMD_DRV_LOG(ERR, "pool is not specified.");
9840                         rte_free(mirr_rule);
9841                         return -EINVAL;
9842                 }
9843                 /* egress and ingress in aq commands means from switch but not port */
9844                 mirr_rule->rule_type =
9845                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
9846                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
9847                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
9848                 break;
9849         case ETH_MIRROR_UPLINK_PORT:
9850                 /* egress and ingress in aq commands means from switch but not port*/
9851                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
9852                 break;
9853         case ETH_MIRROR_DOWNLINK_PORT:
9854                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
9855                 break;
9856         default:
9857                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
9858                         mirror_conf->rule_type);
9859                 rte_free(mirr_rule);
9860                 return -EINVAL;
9861         }
9862
9863         /* If the dst_pool is equal to vf_num, consider it as PF */
9864         if (mirror_conf->dst_pool == pf->vf_num)
9865                 dst_seid = pf->main_vsi_seid;
9866         else
9867                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
9868
9869         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
9870                                       mirr_rule->rule_type, mirr_rule->entries,
9871                                       j, &rule_id);
9872         if (ret < 0) {
9873                 PMD_DRV_LOG(ERR,
9874                         "failed to add mirror rule: ret = %d, aq_err = %d.",
9875                         ret, hw->aq.asq_last_status);
9876                 rte_free(mirr_rule);
9877                 return -ENOSYS;
9878         }
9879
9880         mirr_rule->index = sw_id;
9881         mirr_rule->num_entries = j;
9882         mirr_rule->id = rule_id;
9883         mirr_rule->dst_vsi_seid = dst_seid;
9884
9885         if (parent)
9886                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
9887         else
9888                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
9889
9890         pf->nb_mirror_rule++;
9891         return 0;
9892 }
9893
9894 /**
9895  * i40e_mirror_rule_reset
9896  * @dev: pointer to the device
9897  * @sw_id: mirror rule's sw_id
9898  *
9899  * reset a mirror rule.
9900  *
9901  **/
9902 static int
9903 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
9904 {
9905         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9906         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9907         struct i40e_mirror_rule *it, *mirr_rule = NULL;
9908         uint16_t seid;
9909         int ret;
9910
9911         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
9912
9913         seid = pf->main_vsi->veb->seid;
9914
9915         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
9916                 if (sw_id == it->index) {
9917                         mirr_rule = it;
9918                         break;
9919                 }
9920         }
9921         if (mirr_rule) {
9922                 ret = i40e_aq_del_mirror_rule(hw, seid,
9923                                 mirr_rule->rule_type,
9924                                 mirr_rule->entries,
9925                                 mirr_rule->num_entries, mirr_rule->id);
9926                 if (ret < 0) {
9927                         PMD_DRV_LOG(ERR,
9928                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
9929                                 ret, hw->aq.asq_last_status);
9930                         return -ENOSYS;
9931                 }
9932                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
9933                 rte_free(mirr_rule);
9934                 pf->nb_mirror_rule--;
9935         } else {
9936                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
9937                 return -ENOENT;
9938         }
9939         return 0;
9940 }
9941
9942 static uint64_t
9943 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
9944 {
9945         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9946         uint64_t systim_cycles;
9947
9948         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
9949         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
9950                         << 32;
9951
9952         return systim_cycles;
9953 }
9954
9955 static uint64_t
9956 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
9957 {
9958         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9959         uint64_t rx_tstamp;
9960
9961         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
9962         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
9963                         << 32;
9964
9965         return rx_tstamp;
9966 }
9967
9968 static uint64_t
9969 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
9970 {
9971         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9972         uint64_t tx_tstamp;
9973
9974         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
9975         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
9976                         << 32;
9977
9978         return tx_tstamp;
9979 }
9980
9981 static void
9982 i40e_start_timecounters(struct rte_eth_dev *dev)
9983 {
9984         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9985         struct i40e_adapter *adapter =
9986                         (struct i40e_adapter *)dev->data->dev_private;
9987         struct rte_eth_link link;
9988         uint32_t tsync_inc_l;
9989         uint32_t tsync_inc_h;
9990
9991         /* Get current link speed. */
9992         memset(&link, 0, sizeof(link));
9993         i40e_dev_link_update(dev, 1);
9994         rte_i40e_dev_atomic_read_link_status(dev, &link);
9995
9996         switch (link.link_speed) {
9997         case ETH_SPEED_NUM_40G:
9998                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
9999                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10000                 break;
10001         case ETH_SPEED_NUM_10G:
10002                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10003                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10004                 break;
10005         case ETH_SPEED_NUM_1G:
10006                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10007                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10008                 break;
10009         default:
10010                 tsync_inc_l = 0x0;
10011                 tsync_inc_h = 0x0;
10012         }
10013
10014         /* Set the timesync increment value. */
10015         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10016         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10017
10018         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10019         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10020         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10021
10022         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10023         adapter->systime_tc.cc_shift = 0;
10024         adapter->systime_tc.nsec_mask = 0;
10025
10026         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10027         adapter->rx_tstamp_tc.cc_shift = 0;
10028         adapter->rx_tstamp_tc.nsec_mask = 0;
10029
10030         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10031         adapter->tx_tstamp_tc.cc_shift = 0;
10032         adapter->tx_tstamp_tc.nsec_mask = 0;
10033 }
10034
10035 static int
10036 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10037 {
10038         struct i40e_adapter *adapter =
10039                         (struct i40e_adapter *)dev->data->dev_private;
10040
10041         adapter->systime_tc.nsec += delta;
10042         adapter->rx_tstamp_tc.nsec += delta;
10043         adapter->tx_tstamp_tc.nsec += delta;
10044
10045         return 0;
10046 }
10047
10048 static int
10049 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10050 {
10051         uint64_t ns;
10052         struct i40e_adapter *adapter =
10053                         (struct i40e_adapter *)dev->data->dev_private;
10054
10055         ns = rte_timespec_to_ns(ts);
10056
10057         /* Set the timecounters to a new value. */
10058         adapter->systime_tc.nsec = ns;
10059         adapter->rx_tstamp_tc.nsec = ns;
10060         adapter->tx_tstamp_tc.nsec = ns;
10061
10062         return 0;
10063 }
10064
10065 static int
10066 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10067 {
10068         uint64_t ns, systime_cycles;
10069         struct i40e_adapter *adapter =
10070                         (struct i40e_adapter *)dev->data->dev_private;
10071
10072         systime_cycles = i40e_read_systime_cyclecounter(dev);
10073         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10074         *ts = rte_ns_to_timespec(ns);
10075
10076         return 0;
10077 }
10078
10079 static int
10080 i40e_timesync_enable(struct rte_eth_dev *dev)
10081 {
10082         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10083         uint32_t tsync_ctl_l;
10084         uint32_t tsync_ctl_h;
10085
10086         /* Stop the timesync system time. */
10087         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10088         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10089         /* Reset the timesync system time value. */
10090         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10091         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10092
10093         i40e_start_timecounters(dev);
10094
10095         /* Clear timesync registers. */
10096         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10097         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10098         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10099         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10100         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10101         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10102
10103         /* Enable timestamping of PTP packets. */
10104         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10105         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10106
10107         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10108         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10109         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10110
10111         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10112         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10113
10114         return 0;
10115 }
10116
10117 static int
10118 i40e_timesync_disable(struct rte_eth_dev *dev)
10119 {
10120         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10121         uint32_t tsync_ctl_l;
10122         uint32_t tsync_ctl_h;
10123
10124         /* Disable timestamping of transmitted PTP packets. */
10125         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10126         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10127
10128         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10129         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10130
10131         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10132         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10133
10134         /* Reset the timesync increment value. */
10135         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10136         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10137
10138         return 0;
10139 }
10140
10141 static int
10142 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10143                                 struct timespec *timestamp, uint32_t flags)
10144 {
10145         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10146         struct i40e_adapter *adapter =
10147                 (struct i40e_adapter *)dev->data->dev_private;
10148
10149         uint32_t sync_status;
10150         uint32_t index = flags & 0x03;
10151         uint64_t rx_tstamp_cycles;
10152         uint64_t ns;
10153
10154         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10155         if ((sync_status & (1 << index)) == 0)
10156                 return -EINVAL;
10157
10158         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10159         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10160         *timestamp = rte_ns_to_timespec(ns);
10161
10162         return 0;
10163 }
10164
10165 static int
10166 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10167                                 struct timespec *timestamp)
10168 {
10169         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10170         struct i40e_adapter *adapter =
10171                 (struct i40e_adapter *)dev->data->dev_private;
10172
10173         uint32_t sync_status;
10174         uint64_t tx_tstamp_cycles;
10175         uint64_t ns;
10176
10177         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10178         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10179                 return -EINVAL;
10180
10181         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10182         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10183         *timestamp = rte_ns_to_timespec(ns);
10184
10185         return 0;
10186 }
10187
10188 /*
10189  * i40e_parse_dcb_configure - parse dcb configure from user
10190  * @dev: the device being configured
10191  * @dcb_cfg: pointer of the result of parse
10192  * @*tc_map: bit map of enabled traffic classes
10193  *
10194  * Returns 0 on success, negative value on failure
10195  */
10196 static int
10197 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10198                          struct i40e_dcbx_config *dcb_cfg,
10199                          uint8_t *tc_map)
10200 {
10201         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10202         uint8_t i, tc_bw, bw_lf;
10203
10204         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10205
10206         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10207         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10208                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10209                 return -EINVAL;
10210         }
10211
10212         /* assume each tc has the same bw */
10213         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10214         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10215                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10216         /* to ensure the sum of tcbw is equal to 100 */
10217         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10218         for (i = 0; i < bw_lf; i++)
10219                 dcb_cfg->etscfg.tcbwtable[i]++;
10220
10221         /* assume each tc has the same Transmission Selection Algorithm */
10222         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10223                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10224
10225         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10226                 dcb_cfg->etscfg.prioritytable[i] =
10227                                 dcb_rx_conf->dcb_tc[i];
10228
10229         /* FW needs one App to configure HW */
10230         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10231         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10232         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10233         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10234
10235         if (dcb_rx_conf->nb_tcs == 0)
10236                 *tc_map = 1; /* tc0 only */
10237         else
10238                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10239
10240         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10241                 dcb_cfg->pfc.willing = 0;
10242                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10243                 dcb_cfg->pfc.pfcenable = *tc_map;
10244         }
10245         return 0;
10246 }
10247
10248
10249 static enum i40e_status_code
10250 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10251                               struct i40e_aqc_vsi_properties_data *info,
10252                               uint8_t enabled_tcmap)
10253 {
10254         enum i40e_status_code ret;
10255         int i, total_tc = 0;
10256         uint16_t qpnum_per_tc, bsf, qp_idx;
10257         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10258         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10259         uint16_t used_queues;
10260
10261         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10262         if (ret != I40E_SUCCESS)
10263                 return ret;
10264
10265         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10266                 if (enabled_tcmap & (1 << i))
10267                         total_tc++;
10268         }
10269         if (total_tc == 0)
10270                 total_tc = 1;
10271         vsi->enabled_tc = enabled_tcmap;
10272
10273         /* different VSI has different queues assigned */
10274         if (vsi->type == I40E_VSI_MAIN)
10275                 used_queues = dev_data->nb_rx_queues -
10276                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10277         else if (vsi->type == I40E_VSI_VMDQ2)
10278                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10279         else {
10280                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10281                 return I40E_ERR_NO_AVAILABLE_VSI;
10282         }
10283
10284         qpnum_per_tc = used_queues / total_tc;
10285         /* Number of queues per enabled TC */
10286         if (qpnum_per_tc == 0) {
10287                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10288                 return I40E_ERR_INVALID_QP_ID;
10289         }
10290         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10291                                 I40E_MAX_Q_PER_TC);
10292         bsf = rte_bsf32(qpnum_per_tc);
10293
10294         /**
10295          * Configure TC and queue mapping parameters, for enabled TC,
10296          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10297          * default queue will serve it.
10298          */
10299         qp_idx = 0;
10300         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10301                 if (vsi->enabled_tc & (1 << i)) {
10302                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10303                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10304                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10305                         qp_idx += qpnum_per_tc;
10306                 } else
10307                         info->tc_mapping[i] = 0;
10308         }
10309
10310         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10311         if (vsi->type == I40E_VSI_SRIOV) {
10312                 info->mapping_flags |=
10313                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10314                 for (i = 0; i < vsi->nb_qps; i++)
10315                         info->queue_mapping[i] =
10316                                 rte_cpu_to_le_16(vsi->base_queue + i);
10317         } else {
10318                 info->mapping_flags |=
10319                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10320                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10321         }
10322         info->valid_sections |=
10323                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10324
10325         return I40E_SUCCESS;
10326 }
10327
10328 /*
10329  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10330  * @veb: VEB to be configured
10331  * @tc_map: enabled TC bitmap
10332  *
10333  * Returns 0 on success, negative value on failure
10334  */
10335 static enum i40e_status_code
10336 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10337 {
10338         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10339         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10340         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10341         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10342         enum i40e_status_code ret = I40E_SUCCESS;
10343         int i;
10344         uint32_t bw_max;
10345
10346         /* Check if enabled_tc is same as existing or new TCs */
10347         if (veb->enabled_tc == tc_map)
10348                 return ret;
10349
10350         /* configure tc bandwidth */
10351         memset(&veb_bw, 0, sizeof(veb_bw));
10352         veb_bw.tc_valid_bits = tc_map;
10353         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10354         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10355                 if (tc_map & BIT_ULL(i))
10356                         veb_bw.tc_bw_share_credits[i] = 1;
10357         }
10358         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10359                                                    &veb_bw, NULL);
10360         if (ret) {
10361                 PMD_INIT_LOG(ERR,
10362                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10363                         hw->aq.asq_last_status);
10364                 return ret;
10365         }
10366
10367         memset(&ets_query, 0, sizeof(ets_query));
10368         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10369                                                    &ets_query, NULL);
10370         if (ret != I40E_SUCCESS) {
10371                 PMD_DRV_LOG(ERR,
10372                         "Failed to get switch_comp ETS configuration %u",
10373                         hw->aq.asq_last_status);
10374                 return ret;
10375         }
10376         memset(&bw_query, 0, sizeof(bw_query));
10377         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10378                                                   &bw_query, NULL);
10379         if (ret != I40E_SUCCESS) {
10380                 PMD_DRV_LOG(ERR,
10381                         "Failed to get switch_comp bandwidth configuration %u",
10382                         hw->aq.asq_last_status);
10383                 return ret;
10384         }
10385
10386         /* store and print out BW info */
10387         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10388         veb->bw_info.bw_max = ets_query.tc_bw_max;
10389         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10390         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10391         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10392                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10393                      I40E_16_BIT_WIDTH);
10394         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10395                 veb->bw_info.bw_ets_share_credits[i] =
10396                                 bw_query.tc_bw_share_credits[i];
10397                 veb->bw_info.bw_ets_credits[i] =
10398                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10399                 /* 4 bits per TC, 4th bit is reserved */
10400                 veb->bw_info.bw_ets_max[i] =
10401                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10402                                   RTE_LEN2MASK(3, uint8_t));
10403                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10404                             veb->bw_info.bw_ets_share_credits[i]);
10405                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10406                             veb->bw_info.bw_ets_credits[i]);
10407                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10408                             veb->bw_info.bw_ets_max[i]);
10409         }
10410
10411         veb->enabled_tc = tc_map;
10412
10413         return ret;
10414 }
10415
10416
10417 /*
10418  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10419  * @vsi: VSI to be configured
10420  * @tc_map: enabled TC bitmap
10421  *
10422  * Returns 0 on success, negative value on failure
10423  */
10424 static enum i40e_status_code
10425 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10426 {
10427         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10428         struct i40e_vsi_context ctxt;
10429         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10430         enum i40e_status_code ret = I40E_SUCCESS;
10431         int i;
10432
10433         /* Check if enabled_tc is same as existing or new TCs */
10434         if (vsi->enabled_tc == tc_map)
10435                 return ret;
10436
10437         /* configure tc bandwidth */
10438         memset(&bw_data, 0, sizeof(bw_data));
10439         bw_data.tc_valid_bits = tc_map;
10440         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10441         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10442                 if (tc_map & BIT_ULL(i))
10443                         bw_data.tc_bw_credits[i] = 1;
10444         }
10445         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10446         if (ret) {
10447                 PMD_INIT_LOG(ERR,
10448                         "AQ command Config VSI BW allocation per TC failed = %d",
10449                         hw->aq.asq_last_status);
10450                 goto out;
10451         }
10452         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10453                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10454
10455         /* Update Queue Pairs Mapping for currently enabled UPs */
10456         ctxt.seid = vsi->seid;
10457         ctxt.pf_num = hw->pf_id;
10458         ctxt.vf_num = 0;
10459         ctxt.uplink_seid = vsi->uplink_seid;
10460         ctxt.info = vsi->info;
10461         i40e_get_cap(hw);
10462         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10463         if (ret)
10464                 goto out;
10465
10466         /* Update the VSI after updating the VSI queue-mapping information */
10467         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10468         if (ret) {
10469                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10470                         hw->aq.asq_last_status);
10471                 goto out;
10472         }
10473         /* update the local VSI info with updated queue map */
10474         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10475                                         sizeof(vsi->info.tc_mapping));
10476         rte_memcpy(&vsi->info.queue_mapping,
10477                         &ctxt.info.queue_mapping,
10478                 sizeof(vsi->info.queue_mapping));
10479         vsi->info.mapping_flags = ctxt.info.mapping_flags;
10480         vsi->info.valid_sections = 0;
10481
10482         /* query and update current VSI BW information */
10483         ret = i40e_vsi_get_bw_config(vsi);
10484         if (ret) {
10485                 PMD_INIT_LOG(ERR,
10486                          "Failed updating vsi bw info, err %s aq_err %s",
10487                          i40e_stat_str(hw, ret),
10488                          i40e_aq_str(hw, hw->aq.asq_last_status));
10489                 goto out;
10490         }
10491
10492         vsi->enabled_tc = tc_map;
10493
10494 out:
10495         return ret;
10496 }
10497
10498 /*
10499  * i40e_dcb_hw_configure - program the dcb setting to hw
10500  * @pf: pf the configuration is taken on
10501  * @new_cfg: new configuration
10502  * @tc_map: enabled TC bitmap
10503  *
10504  * Returns 0 on success, negative value on failure
10505  */
10506 static enum i40e_status_code
10507 i40e_dcb_hw_configure(struct i40e_pf *pf,
10508                       struct i40e_dcbx_config *new_cfg,
10509                       uint8_t tc_map)
10510 {
10511         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10512         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
10513         struct i40e_vsi *main_vsi = pf->main_vsi;
10514         struct i40e_vsi_list *vsi_list;
10515         enum i40e_status_code ret;
10516         int i;
10517         uint32_t val;
10518
10519         /* Use the FW API if FW > v4.4*/
10520         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
10521               (hw->aq.fw_maj_ver >= 5))) {
10522                 PMD_INIT_LOG(ERR,
10523                         "FW < v4.4, can not use FW LLDP API to configure DCB");
10524                 return I40E_ERR_FIRMWARE_API_VERSION;
10525         }
10526
10527         /* Check if need reconfiguration */
10528         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
10529                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
10530                 return I40E_SUCCESS;
10531         }
10532
10533         /* Copy the new config to the current config */
10534         *old_cfg = *new_cfg;
10535         old_cfg->etsrec = old_cfg->etscfg;
10536         ret = i40e_set_dcb_config(hw);
10537         if (ret) {
10538                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
10539                          i40e_stat_str(hw, ret),
10540                          i40e_aq_str(hw, hw->aq.asq_last_status));
10541                 return ret;
10542         }
10543         /* set receive Arbiter to RR mode and ETS scheme by default */
10544         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
10545                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
10546                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
10547                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
10548                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
10549                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
10550                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
10551                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
10552                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
10553                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
10554                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
10555                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
10556                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
10557         }
10558         /* get local mib to check whether it is configured correctly */
10559         /* IEEE mode */
10560         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
10561         /* Get Local DCB Config */
10562         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
10563                                      &hw->local_dcbx_config);
10564
10565         /* if Veb is created, need to update TC of it at first */
10566         if (main_vsi->veb) {
10567                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
10568                 if (ret)
10569                         PMD_INIT_LOG(WARNING,
10570                                  "Failed configuring TC for VEB seid=%d",
10571                                  main_vsi->veb->seid);
10572         }
10573         /* Update each VSI */
10574         i40e_vsi_config_tc(main_vsi, tc_map);
10575         if (main_vsi->veb) {
10576                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
10577                         /* Beside main VSI and VMDQ VSIs, only enable default
10578                          * TC for other VSIs
10579                          */
10580                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
10581                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10582                                                          tc_map);
10583                         else
10584                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10585                                                          I40E_DEFAULT_TCMAP);
10586                         if (ret)
10587                                 PMD_INIT_LOG(WARNING,
10588                                         "Failed configuring TC for VSI seid=%d",
10589                                         vsi_list->vsi->seid);
10590                         /* continue */
10591                 }
10592         }
10593         return I40E_SUCCESS;
10594 }
10595
10596 /*
10597  * i40e_dcb_init_configure - initial dcb config
10598  * @dev: device being configured
10599  * @sw_dcb: indicate whether dcb is sw configured or hw offload
10600  *
10601  * Returns 0 on success, negative value on failure
10602  */
10603 int
10604 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
10605 {
10606         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10607         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10608         int i, ret = 0;
10609
10610         if ((pf->flags & I40E_FLAG_DCB) == 0) {
10611                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10612                 return -ENOTSUP;
10613         }
10614
10615         /* DCB initialization:
10616          * Update DCB configuration from the Firmware and configure
10617          * LLDP MIB change event.
10618          */
10619         if (sw_dcb == TRUE) {
10620                 ret = i40e_init_dcb(hw);
10621                 /* If lldp agent is stopped, the return value from
10622                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
10623                  * adminq status. Otherwise, it should return success.
10624                  */
10625                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
10626                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
10627                         memset(&hw->local_dcbx_config, 0,
10628                                 sizeof(struct i40e_dcbx_config));
10629                         /* set dcb default configuration */
10630                         hw->local_dcbx_config.etscfg.willing = 0;
10631                         hw->local_dcbx_config.etscfg.maxtcs = 0;
10632                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
10633                         hw->local_dcbx_config.etscfg.tsatable[0] =
10634                                                 I40E_IEEE_TSA_ETS;
10635                         /* all UPs mapping to TC0 */
10636                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10637                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
10638                         hw->local_dcbx_config.etsrec =
10639                                 hw->local_dcbx_config.etscfg;
10640                         hw->local_dcbx_config.pfc.willing = 0;
10641                         hw->local_dcbx_config.pfc.pfccap =
10642                                                 I40E_MAX_TRAFFIC_CLASS;
10643                         /* FW needs one App to configure HW */
10644                         hw->local_dcbx_config.numapps = 1;
10645                         hw->local_dcbx_config.app[0].selector =
10646                                                 I40E_APP_SEL_ETHTYPE;
10647                         hw->local_dcbx_config.app[0].priority = 3;
10648                         hw->local_dcbx_config.app[0].protocolid =
10649                                                 I40E_APP_PROTOID_FCOE;
10650                         ret = i40e_set_dcb_config(hw);
10651                         if (ret) {
10652                                 PMD_INIT_LOG(ERR,
10653                                         "default dcb config fails. err = %d, aq_err = %d.",
10654                                         ret, hw->aq.asq_last_status);
10655                                 return -ENOSYS;
10656                         }
10657                 } else {
10658                         PMD_INIT_LOG(ERR,
10659                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
10660                                 ret, hw->aq.asq_last_status);
10661                         return -ENOTSUP;
10662                 }
10663         } else {
10664                 ret = i40e_aq_start_lldp(hw, NULL);
10665                 if (ret != I40E_SUCCESS)
10666                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
10667
10668                 ret = i40e_init_dcb(hw);
10669                 if (!ret) {
10670                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
10671                                 PMD_INIT_LOG(ERR,
10672                                         "HW doesn't support DCBX offload.");
10673                                 return -ENOTSUP;
10674                         }
10675                 } else {
10676                         PMD_INIT_LOG(ERR,
10677                                 "DCBX configuration failed, err = %d, aq_err = %d.",
10678                                 ret, hw->aq.asq_last_status);
10679                         return -ENOTSUP;
10680                 }
10681         }
10682         return 0;
10683 }
10684
10685 /*
10686  * i40e_dcb_setup - setup dcb related config
10687  * @dev: device being configured
10688  *
10689  * Returns 0 on success, negative value on failure
10690  */
10691 static int
10692 i40e_dcb_setup(struct rte_eth_dev *dev)
10693 {
10694         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10695         struct i40e_dcbx_config dcb_cfg;
10696         uint8_t tc_map = 0;
10697         int ret = 0;
10698
10699         if ((pf->flags & I40E_FLAG_DCB) == 0) {
10700                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10701                 return -ENOTSUP;
10702         }
10703
10704         if (pf->vf_num != 0)
10705                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
10706
10707         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
10708         if (ret) {
10709                 PMD_INIT_LOG(ERR, "invalid dcb config");
10710                 return -EINVAL;
10711         }
10712         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
10713         if (ret) {
10714                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
10715                 return -ENOSYS;
10716         }
10717
10718         return 0;
10719 }
10720
10721 static int
10722 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
10723                       struct rte_eth_dcb_info *dcb_info)
10724 {
10725         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10726         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10727         struct i40e_vsi *vsi = pf->main_vsi;
10728         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
10729         uint16_t bsf, tc_mapping;
10730         int i, j = 0;
10731
10732         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
10733                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
10734         else
10735                 dcb_info->nb_tcs = 1;
10736         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10737                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
10738         for (i = 0; i < dcb_info->nb_tcs; i++)
10739                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
10740
10741         /* get queue mapping if vmdq is disabled */
10742         if (!pf->nb_cfg_vmdq_vsi) {
10743                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10744                         if (!(vsi->enabled_tc & (1 << i)))
10745                                 continue;
10746                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
10747                         dcb_info->tc_queue.tc_rxq[j][i].base =
10748                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
10749                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
10750                         dcb_info->tc_queue.tc_txq[j][i].base =
10751                                 dcb_info->tc_queue.tc_rxq[j][i].base;
10752                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
10753                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
10754                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
10755                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
10756                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
10757                 }
10758                 return 0;
10759         }
10760
10761         /* get queue mapping if vmdq is enabled */
10762         do {
10763                 vsi = pf->vmdq[j].vsi;
10764                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10765                         if (!(vsi->enabled_tc & (1 << i)))
10766                                 continue;
10767                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
10768                         dcb_info->tc_queue.tc_rxq[j][i].base =
10769                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
10770                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
10771                         dcb_info->tc_queue.tc_txq[j][i].base =
10772                                 dcb_info->tc_queue.tc_rxq[j][i].base;
10773                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
10774                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
10775                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
10776                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
10777                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
10778                 }
10779                 j++;
10780         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
10781         return 0;
10782 }
10783
10784 static int
10785 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
10786 {
10787         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10788         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
10789         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10790         uint16_t interval =
10791                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1);
10792         uint16_t msix_intr;
10793
10794         msix_intr = intr_handle->intr_vec[queue_id];
10795         if (msix_intr == I40E_MISC_VEC_ID)
10796                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
10797                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
10798                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
10799                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
10800                                (interval <<
10801                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
10802         else
10803                 I40E_WRITE_REG(hw,
10804                                I40E_PFINT_DYN_CTLN(msix_intr -
10805                                                    I40E_RX_VEC_START),
10806                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
10807                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
10808                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
10809                                (interval <<
10810                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
10811
10812         I40E_WRITE_FLUSH(hw);
10813         rte_intr_enable(&pci_dev->intr_handle);
10814
10815         return 0;
10816 }
10817
10818 static int
10819 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
10820 {
10821         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10822         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
10823         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10824         uint16_t msix_intr;
10825
10826         msix_intr = intr_handle->intr_vec[queue_id];
10827         if (msix_intr == I40E_MISC_VEC_ID)
10828                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
10829         else
10830                 I40E_WRITE_REG(hw,
10831                                I40E_PFINT_DYN_CTLN(msix_intr -
10832                                                    I40E_RX_VEC_START),
10833                                0);
10834         I40E_WRITE_FLUSH(hw);
10835
10836         return 0;
10837 }
10838
10839 static int i40e_get_regs(struct rte_eth_dev *dev,
10840                          struct rte_dev_reg_info *regs)
10841 {
10842         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10843         uint32_t *ptr_data = regs->data;
10844         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
10845         const struct i40e_reg_info *reg_info;
10846
10847         if (ptr_data == NULL) {
10848                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
10849                 regs->width = sizeof(uint32_t);
10850                 return 0;
10851         }
10852
10853         /* The first few registers have to be read using AQ operations */
10854         reg_idx = 0;
10855         while (i40e_regs_adminq[reg_idx].name) {
10856                 reg_info = &i40e_regs_adminq[reg_idx++];
10857                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
10858                         for (arr_idx2 = 0;
10859                                         arr_idx2 <= reg_info->count2;
10860                                         arr_idx2++) {
10861                                 reg_offset = arr_idx * reg_info->stride1 +
10862                                         arr_idx2 * reg_info->stride2;
10863                                 reg_offset += reg_info->base_addr;
10864                                 ptr_data[reg_offset >> 2] =
10865                                         i40e_read_rx_ctl(hw, reg_offset);
10866                         }
10867         }
10868
10869         /* The remaining registers can be read using primitives */
10870         reg_idx = 0;
10871         while (i40e_regs_others[reg_idx].name) {
10872                 reg_info = &i40e_regs_others[reg_idx++];
10873                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
10874                         for (arr_idx2 = 0;
10875                                         arr_idx2 <= reg_info->count2;
10876                                         arr_idx2++) {
10877                                 reg_offset = arr_idx * reg_info->stride1 +
10878                                         arr_idx2 * reg_info->stride2;
10879                                 reg_offset += reg_info->base_addr;
10880                                 ptr_data[reg_offset >> 2] =
10881                                         I40E_READ_REG(hw, reg_offset);
10882                         }
10883         }
10884
10885         return 0;
10886 }
10887
10888 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
10889 {
10890         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10891
10892         /* Convert word count to byte count */
10893         return hw->nvm.sr_size << 1;
10894 }
10895
10896 static int i40e_get_eeprom(struct rte_eth_dev *dev,
10897                            struct rte_dev_eeprom_info *eeprom)
10898 {
10899         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10900         uint16_t *data = eeprom->data;
10901         uint16_t offset, length, cnt_words;
10902         int ret_code;
10903
10904         offset = eeprom->offset >> 1;
10905         length = eeprom->length >> 1;
10906         cnt_words = length;
10907
10908         if (offset > hw->nvm.sr_size ||
10909                 offset + length > hw->nvm.sr_size) {
10910                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
10911                 return -EINVAL;
10912         }
10913
10914         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
10915
10916         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
10917         if (ret_code != I40E_SUCCESS || cnt_words != length) {
10918                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
10919                 return -EIO;
10920         }
10921
10922         return 0;
10923 }
10924
10925 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
10926                                       struct ether_addr *mac_addr)
10927 {
10928         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10929         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10930         struct i40e_vsi *vsi = pf->main_vsi;
10931         struct i40e_mac_filter_info mac_filter;
10932         struct i40e_mac_filter *f;
10933         int ret;
10934
10935         if (!is_valid_assigned_ether_addr(mac_addr)) {
10936                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
10937                 return;
10938         }
10939
10940         TAILQ_FOREACH(f, &vsi->mac_list, next) {
10941                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
10942                         break;
10943         }
10944
10945         if (f == NULL) {
10946                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
10947                 return;
10948         }
10949
10950         mac_filter = f->mac_info;
10951         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
10952         if (ret != I40E_SUCCESS) {
10953                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
10954                 return;
10955         }
10956         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
10957         ret = i40e_vsi_add_mac(vsi, &mac_filter);
10958         if (ret != I40E_SUCCESS) {
10959                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
10960                 return;
10961         }
10962         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
10963
10964         /* Flags: 0x3 updates port address */
10965         i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
10966 }
10967
10968 static int
10969 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
10970 {
10971         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10972         struct rte_eth_dev_data *dev_data = pf->dev_data;
10973         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
10974         int ret = 0;
10975
10976         /* check if mtu is within the allowed range */
10977         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
10978                 return -EINVAL;
10979
10980         /* mtu setting is forbidden if port is start */
10981         if (dev_data->dev_started) {
10982                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
10983                             dev_data->port_id);
10984                 return -EBUSY;
10985         }
10986
10987         if (frame_size > ETHER_MAX_LEN)
10988                 dev_data->dev_conf.rxmode.jumbo_frame = 1;
10989         else
10990                 dev_data->dev_conf.rxmode.jumbo_frame = 0;
10991
10992         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
10993
10994         return ret;
10995 }
10996
10997 /* Restore ethertype filter */
10998 static void
10999 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11000 {
11001         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11002         struct i40e_ethertype_filter_list
11003                 *ethertype_list = &pf->ethertype.ethertype_list;
11004         struct i40e_ethertype_filter *f;
11005         struct i40e_control_filter_stats stats;
11006         uint16_t flags;
11007
11008         TAILQ_FOREACH(f, ethertype_list, rules) {
11009                 flags = 0;
11010                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11011                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11012                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11013                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11014                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11015
11016                 memset(&stats, 0, sizeof(stats));
11017                 i40e_aq_add_rem_control_packet_filter(hw,
11018                                             f->input.mac_addr.addr_bytes,
11019                                             f->input.ether_type,
11020                                             flags, pf->main_vsi->seid,
11021                                             f->queue, 1, &stats, NULL);
11022         }
11023         PMD_DRV_LOG(INFO, "Ethertype filter:"
11024                     " mac_etype_used = %u, etype_used = %u,"
11025                     " mac_etype_free = %u, etype_free = %u",
11026                     stats.mac_etype_used, stats.etype_used,
11027                     stats.mac_etype_free, stats.etype_free);
11028 }
11029
11030 /* Restore tunnel filter */
11031 static void
11032 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11033 {
11034         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11035         struct i40e_vsi *vsi;
11036         struct i40e_pf_vf *vf;
11037         struct i40e_tunnel_filter_list
11038                 *tunnel_list = &pf->tunnel.tunnel_list;
11039         struct i40e_tunnel_filter *f;
11040         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
11041         bool big_buffer = 0;
11042
11043         TAILQ_FOREACH(f, tunnel_list, rules) {
11044                 if (!f->is_to_vf)
11045                         vsi = pf->main_vsi;
11046                 else {
11047                         vf = &pf->vfs[f->vf_id];
11048                         vsi = vf->vsi;
11049                 }
11050                 memset(&cld_filter, 0, sizeof(cld_filter));
11051                 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
11052                         (struct ether_addr *)&cld_filter.element.outer_mac);
11053                 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
11054                         (struct ether_addr *)&cld_filter.element.inner_mac);
11055                 cld_filter.element.inner_vlan = f->input.inner_vlan;
11056                 cld_filter.element.flags = f->input.flags;
11057                 cld_filter.element.tenant_id = f->input.tenant_id;
11058                 cld_filter.element.queue_number = f->queue;
11059                 rte_memcpy(cld_filter.general_fields,
11060                            f->input.general_fields,
11061                            sizeof(f->input.general_fields));
11062
11063                 if (((f->input.flags &
11064                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11065                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11066                     ((f->input.flags &
11067                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11068                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11069                     ((f->input.flags &
11070                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11071                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
11072                         big_buffer = 1;
11073
11074                 if (big_buffer)
11075                         i40e_aq_add_cloud_filters_big_buffer(hw,
11076                                              vsi->seid, &cld_filter, 1);
11077                 else
11078                         i40e_aq_add_cloud_filters(hw, vsi->seid,
11079                                                   &cld_filter.element, 1);
11080         }
11081 }
11082
11083 static void
11084 i40e_filter_restore(struct i40e_pf *pf)
11085 {
11086         i40e_ethertype_filter_restore(pf);
11087         i40e_tunnel_filter_restore(pf);
11088         i40e_fdir_filter_restore(pf);
11089 }
11090
11091 static bool
11092 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11093 {
11094         if (strcmp(dev->device->driver->name, drv->driver.name))
11095                 return false;
11096
11097         return true;
11098 }
11099
11100 bool
11101 is_i40e_supported(struct rte_eth_dev *dev)
11102 {
11103         return is_device_supported(dev, &rte_i40e_pmd);
11104 }
11105
11106 struct i40e_customized_pctype*
11107 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11108 {
11109         int i;
11110
11111         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11112                 if (pf->customized_pctype[i].index == index)
11113                         return &pf->customized_pctype[i];
11114         }
11115         return NULL;
11116 }
11117
11118 static int
11119 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11120                               uint32_t pkg_size, uint32_t proto_num,
11121                               struct rte_pmd_i40e_proto_info *proto)
11122 {
11123         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11124         uint32_t pctype_num;
11125         struct rte_pmd_i40e_ptype_info *pctype;
11126         uint32_t buff_size;
11127         struct i40e_customized_pctype *new_pctype = NULL;
11128         uint8_t proto_id;
11129         uint8_t pctype_value;
11130         char name[64];
11131         uint32_t i, j, n;
11132         int ret;
11133
11134         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11135                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
11136                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11137         if (ret) {
11138                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11139                 return -1;
11140         }
11141         if (!pctype_num) {
11142                 PMD_DRV_LOG(INFO, "No new pctype added");
11143                 return -1;
11144         }
11145
11146         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11147         pctype = rte_zmalloc("new_pctype", buff_size, 0);
11148         if (!pctype) {
11149                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11150                 return -1;
11151         }
11152         /* get information about new pctype list */
11153         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11154                                         (uint8_t *)pctype, buff_size,
11155                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11156         if (ret) {
11157                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11158                 rte_free(pctype);
11159                 return -1;
11160         }
11161
11162         /* Update customized pctype. */
11163         for (i = 0; i < pctype_num; i++) {
11164                 pctype_value = pctype[i].ptype_id;
11165                 memset(name, 0, sizeof(name));
11166                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11167                         proto_id = pctype[i].protocols[j];
11168                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11169                                 continue;
11170                         for (n = 0; n < proto_num; n++) {
11171                                 if (proto[n].proto_id != proto_id)
11172                                         continue;
11173                                 strcat(name, proto[n].name);
11174                                 strcat(name, "_");
11175                                 break;
11176                         }
11177                 }
11178                 name[strlen(name) - 1] = '\0';
11179                 if (!strcmp(name, "GTPC"))
11180                         new_pctype =
11181                                 i40e_find_customized_pctype(pf,
11182                                                       I40E_CUSTOMIZED_GTPC);
11183                 else if (!strcmp(name, "GTPU_IPV4"))
11184                         new_pctype =
11185                                 i40e_find_customized_pctype(pf,
11186                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11187                 else if (!strcmp(name, "GTPU_IPV6"))
11188                         new_pctype =
11189                                 i40e_find_customized_pctype(pf,
11190                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11191                 else if (!strcmp(name, "GTPU"))
11192                         new_pctype =
11193                                 i40e_find_customized_pctype(pf,
11194                                                       I40E_CUSTOMIZED_GTPU);
11195                 if (new_pctype) {
11196                         new_pctype->pctype = pctype_value;
11197                         new_pctype->valid = true;
11198                 }
11199         }
11200
11201         rte_free(pctype);
11202         return 0;
11203 }
11204
11205 static int
11206 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11207                                uint32_t pkg_size, uint32_t proto_num,
11208                                struct rte_pmd_i40e_proto_info *proto)
11209 {
11210         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11211         uint16_t port_id = dev->data->port_id;
11212         uint32_t ptype_num;
11213         struct rte_pmd_i40e_ptype_info *ptype;
11214         uint32_t buff_size;
11215         uint8_t proto_id;
11216         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11217         uint32_t i, j, n;
11218         bool in_tunnel;
11219         int ret;
11220
11221         /* get information about new ptype num */
11222         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11223                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
11224                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11225         if (ret) {
11226                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
11227                 return ret;
11228         }
11229         if (!ptype_num) {
11230                 PMD_DRV_LOG(INFO, "No new ptype added");
11231                 return -1;
11232         }
11233
11234         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11235         ptype = rte_zmalloc("new_ptype", buff_size, 0);
11236         if (!ptype) {
11237                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11238                 return -1;
11239         }
11240
11241         /* get information about new ptype list */
11242         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11243                                         (uint8_t *)ptype, buff_size,
11244                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11245         if (ret) {
11246                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
11247                 rte_free(ptype);
11248                 return ret;
11249         }
11250
11251         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11252         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11253         if (!ptype_mapping) {
11254                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11255                 rte_free(ptype);
11256                 return -1;
11257         }
11258
11259         /* Update ptype mapping table. */
11260         for (i = 0; i < ptype_num; i++) {
11261                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11262                 ptype_mapping[i].sw_ptype = 0;
11263                 in_tunnel = false;
11264                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11265                         proto_id = ptype[i].protocols[j];
11266                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11267                                 continue;
11268                         for (n = 0; n < proto_num; n++) {
11269                                 if (proto[n].proto_id != proto_id)
11270                                         continue;
11271                                 memset(name, 0, sizeof(name));
11272                                 strcpy(name, proto[n].name);
11273                                 if (!strncmp(name, "PPPOE", 5))
11274                                         ptype_mapping[i].sw_ptype |=
11275                                                 RTE_PTYPE_L2_ETHER_PPPOE;
11276                                 else if (!strncmp(name, "OIPV4", 5)) {
11277                                         ptype_mapping[i].sw_ptype |=
11278                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11279                                         in_tunnel = true;
11280                                 } else if (!strncmp(name, "IPV4", 4) &&
11281                                            !in_tunnel)
11282                                         ptype_mapping[i].sw_ptype |=
11283                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11284                                 else if (!strncmp(name, "IPV4FRAG", 8) &&
11285                                          in_tunnel) {
11286                                         ptype_mapping[i].sw_ptype |=
11287                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11288                                         ptype_mapping[i].sw_ptype |=
11289                                                 RTE_PTYPE_INNER_L4_FRAG;
11290                                 } else if (!strncmp(name, "IPV4", 4) &&
11291                                            in_tunnel)
11292                                         ptype_mapping[i].sw_ptype |=
11293                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11294                                 else if (!strncmp(name, "OIPV6", 5)) {
11295                                         ptype_mapping[i].sw_ptype |=
11296                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11297                                         in_tunnel = true;
11298                                 } else if (!strncmp(name, "IPV6", 4) &&
11299                                            !in_tunnel)
11300                                         ptype_mapping[i].sw_ptype |=
11301                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11302                                 else if (!strncmp(name, "IPV6FRAG", 8) &&
11303                                          in_tunnel) {
11304                                         ptype_mapping[i].sw_ptype |=
11305                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11306                                         ptype_mapping[i].sw_ptype |=
11307                                                 RTE_PTYPE_INNER_L4_FRAG;
11308                                 } else if (!strncmp(name, "IPV6", 4) &&
11309                                            in_tunnel)
11310                                         ptype_mapping[i].sw_ptype |=
11311                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11312                                 else if (!strncmp(name, "UDP", 3) && !in_tunnel)
11313                                         ptype_mapping[i].sw_ptype |=
11314                                                 RTE_PTYPE_L4_UDP;
11315                                 else if (!strncmp(name, "UDP", 3) && in_tunnel)
11316                                         ptype_mapping[i].sw_ptype |=
11317                                                 RTE_PTYPE_INNER_L4_UDP;
11318                                 else if (!strncmp(name, "TCP", 3) && !in_tunnel)
11319                                         ptype_mapping[i].sw_ptype |=
11320                                                 RTE_PTYPE_L4_TCP;
11321                                 else if (!strncmp(name, "TCP", 3) && in_tunnel)
11322                                         ptype_mapping[i].sw_ptype |=
11323                                                 RTE_PTYPE_INNER_L4_TCP;
11324                                 else if (!strncmp(name, "SCTP", 4) &&
11325                                          !in_tunnel)
11326                                         ptype_mapping[i].sw_ptype |=
11327                                                 RTE_PTYPE_L4_SCTP;
11328                                 else if (!strncmp(name, "SCTP", 4) && in_tunnel)
11329                                         ptype_mapping[i].sw_ptype |=
11330                                                 RTE_PTYPE_INNER_L4_SCTP;
11331                                 else if ((!strncmp(name, "ICMP", 4) ||
11332                                           !strncmp(name, "ICMPV6", 6)) &&
11333                                          !in_tunnel)
11334                                         ptype_mapping[i].sw_ptype |=
11335                                                 RTE_PTYPE_L4_ICMP;
11336                                 else if ((!strncmp(name, "ICMP", 4) ||
11337                                           !strncmp(name, "ICMPV6", 6)) &&
11338                                          in_tunnel)
11339                                         ptype_mapping[i].sw_ptype |=
11340                                                 RTE_PTYPE_INNER_L4_ICMP;
11341                                 else if (!strncmp(name, "GTPC", 4)) {
11342                                         ptype_mapping[i].sw_ptype |=
11343                                                 RTE_PTYPE_TUNNEL_GTPC;
11344                                         in_tunnel = true;
11345                                 } else if (!strncmp(name, "GTPU", 4)) {
11346                                         ptype_mapping[i].sw_ptype |=
11347                                                 RTE_PTYPE_TUNNEL_GTPU;
11348                                         in_tunnel = true;
11349                                 } else if (!strncmp(name, "GRENAT", 6)) {
11350                                         ptype_mapping[i].sw_ptype |=
11351                                                 RTE_PTYPE_TUNNEL_GRENAT;
11352                                         in_tunnel = true;
11353                                 } else if (!strncmp(name, "L2TPv2CTL", 9)) {
11354                                         ptype_mapping[i].sw_ptype |=
11355                                                 RTE_PTYPE_TUNNEL_L2TP;
11356                                         in_tunnel = true;
11357                                 }
11358
11359                                 break;
11360                         }
11361                 }
11362         }
11363
11364         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
11365                                                 ptype_num, 0);
11366         if (ret)
11367                 PMD_DRV_LOG(ERR, "Failed to update mapping table.");
11368
11369         rte_free(ptype_mapping);
11370         rte_free(ptype);
11371         return ret;
11372 }
11373
11374 void
11375 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
11376                               uint32_t pkg_size)
11377 {
11378         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11379         uint32_t proto_num;
11380         struct rte_pmd_i40e_proto_info *proto;
11381         uint32_t buff_size;
11382         uint32_t i;
11383         int ret;
11384
11385         /* get information about protocol number */
11386         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11387                                        (uint8_t *)&proto_num, sizeof(proto_num),
11388                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
11389         if (ret) {
11390                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
11391                 return;
11392         }
11393         if (!proto_num) {
11394                 PMD_DRV_LOG(INFO, "No new protocol added");
11395                 return;
11396         }
11397
11398         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
11399         proto = rte_zmalloc("new_proto", buff_size, 0);
11400         if (!proto) {
11401                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11402                 return;
11403         }
11404
11405         /* get information about protocol list */
11406         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11407                                         (uint8_t *)proto, buff_size,
11408                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
11409         if (ret) {
11410                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
11411                 rte_free(proto);
11412                 return;
11413         }
11414
11415         /* Check if GTP is supported. */
11416         for (i = 0; i < proto_num; i++) {
11417                 if (!strncmp(proto[i].name, "GTP", 3)) {
11418                         pf->gtp_support = true;
11419                         break;
11420                 }
11421         }
11422
11423         /* Update customized pctype info */
11424         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
11425                                             proto_num, proto);
11426         if (ret)
11427                 PMD_DRV_LOG(INFO, "No pctype is updated.");
11428
11429         /* Update customized ptype info */
11430         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
11431                                            proto_num, proto);
11432         if (ret)
11433                 PMD_DRV_LOG(INFO, "No ptype is updated.");
11434
11435         rte_free(proto);
11436 }
11437
11438 /* Create a QinQ cloud filter
11439  *
11440  * The Fortville NIC has limited resources for tunnel filters,
11441  * so we can only reuse existing filters.
11442  *
11443  * In step 1 we define which Field Vector fields can be used for
11444  * filter types.
11445  * As we do not have the inner tag defined as a field,
11446  * we have to define it first, by reusing one of L1 entries.
11447  *
11448  * In step 2 we are replacing one of existing filter types with
11449  * a new one for QinQ.
11450  * As we reusing L1 and replacing L2, some of the default filter
11451  * types will disappear,which depends on L1 and L2 entries we reuse.
11452  *
11453  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
11454  *
11455  * 1.   Create L1 filter of outer vlan (12b) which will be in use
11456  *              later when we define the cloud filter.
11457  *      a.      Valid_flags.replace_cloud = 0
11458  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
11459  *      c.      New_filter = 0x10
11460  *      d.      TR bit = 0xff (optional, not used here)
11461  *      e.      Buffer – 2 entries:
11462  *              i.      Byte 0 = 8 (outer vlan FV index).
11463  *                      Byte 1 = 0 (rsv)
11464  *                      Byte 2-3 = 0x0fff
11465  *              ii.     Byte 0 = 37 (inner vlan FV index).
11466  *                      Byte 1 =0 (rsv)
11467  *                      Byte 2-3 = 0x0fff
11468  *
11469  * Step 2:
11470  * 2.   Create cloud filter using two L1 filters entries: stag and
11471  *              new filter(outer vlan+ inner vlan)
11472  *      a.      Valid_flags.replace_cloud = 1
11473  *      b.      Old_filter = 1 (instead of outer IP)
11474  *      c.      New_filter = 0x10
11475  *      d.      Buffer – 2 entries:
11476  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
11477  *                      Byte 1-3 = 0 (rsv)
11478  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
11479  *                      Byte 9-11 = 0 (rsv)
11480  */
11481 static int
11482 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
11483 {
11484         int ret = -ENOTSUP;
11485         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
11486         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
11487         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11488
11489         /* Init */
11490         memset(&filter_replace, 0,
11491                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
11492         memset(&filter_replace_buf, 0,
11493                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
11494
11495         /* create L1 filter */
11496         filter_replace.old_filter_type =
11497                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
11498         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11499         filter_replace.tr_bit = 0;
11500
11501         /* Prepare the buffer, 2 entries */
11502         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
11503         filter_replace_buf.data[0] |=
11504                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11505         /* Field Vector 12b mask */
11506         filter_replace_buf.data[2] = 0xff;
11507         filter_replace_buf.data[3] = 0x0f;
11508         filter_replace_buf.data[4] =
11509                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
11510         filter_replace_buf.data[4] |=
11511                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11512         /* Field Vector 12b mask */
11513         filter_replace_buf.data[6] = 0xff;
11514         filter_replace_buf.data[7] = 0x0f;
11515         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
11516                         &filter_replace_buf);
11517         if (ret != I40E_SUCCESS)
11518                 return ret;
11519
11520         /* Apply the second L2 cloud filter */
11521         memset(&filter_replace, 0,
11522                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
11523         memset(&filter_replace_buf, 0,
11524                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
11525
11526         /* create L2 filter, input for L2 filter will be L1 filter  */
11527         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
11528         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
11529         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11530
11531         /* Prepare the buffer, 2 entries */
11532         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
11533         filter_replace_buf.data[0] |=
11534                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11535         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
11536         filter_replace_buf.data[4] |=
11537                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
11538         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
11539                         &filter_replace_buf);
11540         return ret;
11541 }
11542
11543 RTE_INIT(i40e_init_log);
11544 static void
11545 i40e_init_log(void)
11546 {
11547         i40e_logtype_init = rte_log_register("pmd.i40e.init");
11548         if (i40e_logtype_init >= 0)
11549                 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
11550         i40e_logtype_driver = rte_log_register("pmd.i40e.driver");
11551         if (i40e_logtype_driver >= 0)
11552                 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
11553 }