963e501a64ef7154a6834003a1ede70928f0179c
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42
43 #include <rte_string_fns.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_alarm.h>
51 #include <rte_dev.h>
52 #include <rte_eth_ctrl.h>
53
54 #include "i40e_logs.h"
55 #include "base/i40e_prototype.h"
56 #include "base/i40e_adminq_cmd.h"
57 #include "base/i40e_type.h"
58 #include "base/i40e_register.h"
59 #include "i40e_ethdev.h"
60 #include "i40e_rxtx.h"
61 #include "i40e_pf.h"
62
63 /* Maximun number of MAC addresses */
64 #define I40E_NUM_MACADDR_MAX       64
65 #define I40E_CLEAR_PXE_WAIT_MS     200
66
67 /* Maximun number of capability elements */
68 #define I40E_MAX_CAP_ELE_NUM       128
69
70 /* Wait count and inteval */
71 #define I40E_CHK_Q_ENA_COUNT       1000
72 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
73
74 /* Maximun number of VSI */
75 #define I40E_MAX_NUM_VSIS          (384UL)
76
77 /* Default queue interrupt throttling time in microseconds */
78 #define I40E_ITR_INDEX_DEFAULT          0
79 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
80 #define I40E_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
81
82 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
83
84 /* Mask of PF interrupt causes */
85 #define I40E_PFINT_ICR0_ENA_MASK ( \
86                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
87                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
88                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
89                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
90                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
91                 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
92                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
93                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
94                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
95                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
96
97 #define I40E_FLOW_TYPES ( \
98         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
99         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
100         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
103         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
104         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
105         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
106         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
107         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
108         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
109
110 #define I40E_PTP_40GB_INCVAL  0x0199999999ULL
111 #define I40E_PTP_10GB_INCVAL  0x0333333333ULL
112 #define I40E_PTP_1GB_INCVAL   0x2000000000ULL
113 #define I40E_PRTTSYN_TSYNENA  0x80000000
114 #define I40E_PRTTSYN_TSYNTYPE 0x0e000000
115
116 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
117 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
118 static int i40e_dev_configure(struct rte_eth_dev *dev);
119 static int i40e_dev_start(struct rte_eth_dev *dev);
120 static void i40e_dev_stop(struct rte_eth_dev *dev);
121 static void i40e_dev_close(struct rte_eth_dev *dev);
122 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
123 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
124 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
125 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
126 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
127 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
128 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
129                                struct rte_eth_stats *stats);
130 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
131 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
132                                             uint16_t queue_id,
133                                             uint8_t stat_idx,
134                                             uint8_t is_rx);
135 static void i40e_dev_info_get(struct rte_eth_dev *dev,
136                               struct rte_eth_dev_info *dev_info);
137 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
138                                 uint16_t vlan_id,
139                                 int on);
140 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
141 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
142 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
143                                       uint16_t queue,
144                                       int on);
145 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
146 static int i40e_dev_led_on(struct rte_eth_dev *dev);
147 static int i40e_dev_led_off(struct rte_eth_dev *dev);
148 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
149                               struct rte_eth_fc_conf *fc_conf);
150 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
151                                        struct rte_eth_pfc_conf *pfc_conf);
152 static void i40e_macaddr_add(struct rte_eth_dev *dev,
153                           struct ether_addr *mac_addr,
154                           uint32_t index,
155                           uint32_t pool);
156 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
157 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
158                                     struct rte_eth_rss_reta_entry64 *reta_conf,
159                                     uint16_t reta_size);
160 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
161                                    struct rte_eth_rss_reta_entry64 *reta_conf,
162                                    uint16_t reta_size);
163
164 static int i40e_get_cap(struct i40e_hw *hw);
165 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
166 static int i40e_pf_setup(struct i40e_pf *pf);
167 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
168 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
169 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
170                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
171 static void i40e_stat_update_48(struct i40e_hw *hw,
172                                uint32_t hireg,
173                                uint32_t loreg,
174                                bool offset_loaded,
175                                uint64_t *offset,
176                                uint64_t *stat);
177 static void i40e_pf_config_irq0(struct i40e_hw *hw);
178 static void i40e_dev_interrupt_handler(
179                 __rte_unused struct rte_intr_handle *handle, void *param);
180 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
181                                 uint32_t base, uint32_t num);
182 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
183 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
184                         uint32_t base);
185 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
186                         uint16_t num);
187 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
188 static int i40e_veb_release(struct i40e_veb *veb);
189 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
190                                                 struct i40e_vsi *vsi);
191 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
192 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
193 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
194                                              struct i40e_macvlan_filter *mv_f,
195                                              int num,
196                                              struct ether_addr *addr);
197 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
198                                              struct i40e_macvlan_filter *mv_f,
199                                              int num,
200                                              uint16_t vlan);
201 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
202 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
203                                     struct rte_eth_rss_conf *rss_conf);
204 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
205                                       struct rte_eth_rss_conf *rss_conf);
206 static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
207                                 struct rte_eth_udp_tunnel *udp_tunnel);
208 static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
209                                 struct rte_eth_udp_tunnel *udp_tunnel);
210 static int i40e_ethertype_filter_set(struct i40e_pf *pf,
211                         struct rte_eth_ethertype_filter *filter,
212                         bool add);
213 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
214                                 enum rte_filter_op filter_op,
215                                 void *arg);
216 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
217                                 enum rte_filter_type filter_type,
218                                 enum rte_filter_op filter_op,
219                                 void *arg);
220 static void i40e_configure_registers(struct i40e_hw *hw);
221 static void i40e_hw_init(struct i40e_hw *hw);
222 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
223 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
224                         struct rte_eth_mirror_conf *mirror_conf,
225                         uint8_t sw_id, uint8_t on);
226 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
227
228 static int i40e_timesync_enable(struct rte_eth_dev *dev);
229 static int i40e_timesync_disable(struct rte_eth_dev *dev);
230 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
231                                            struct timespec *timestamp,
232                                            uint32_t flags);
233 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
234                                            struct timespec *timestamp);
235
236 static const struct rte_pci_id pci_id_i40e_map[] = {
237 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
238 #include "rte_pci_dev_ids.h"
239 { .vendor_id = 0, /* sentinel */ },
240 };
241
242 static const struct eth_dev_ops i40e_eth_dev_ops = {
243         .dev_configure                = i40e_dev_configure,
244         .dev_start                    = i40e_dev_start,
245         .dev_stop                     = i40e_dev_stop,
246         .dev_close                    = i40e_dev_close,
247         .promiscuous_enable           = i40e_dev_promiscuous_enable,
248         .promiscuous_disable          = i40e_dev_promiscuous_disable,
249         .allmulticast_enable          = i40e_dev_allmulticast_enable,
250         .allmulticast_disable         = i40e_dev_allmulticast_disable,
251         .dev_set_link_up              = i40e_dev_set_link_up,
252         .dev_set_link_down            = i40e_dev_set_link_down,
253         .link_update                  = i40e_dev_link_update,
254         .stats_get                    = i40e_dev_stats_get,
255         .stats_reset                  = i40e_dev_stats_reset,
256         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
257         .dev_infos_get                = i40e_dev_info_get,
258         .vlan_filter_set              = i40e_vlan_filter_set,
259         .vlan_tpid_set                = i40e_vlan_tpid_set,
260         .vlan_offload_set             = i40e_vlan_offload_set,
261         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
262         .vlan_pvid_set                = i40e_vlan_pvid_set,
263         .rx_queue_start               = i40e_dev_rx_queue_start,
264         .rx_queue_stop                = i40e_dev_rx_queue_stop,
265         .tx_queue_start               = i40e_dev_tx_queue_start,
266         .tx_queue_stop                = i40e_dev_tx_queue_stop,
267         .rx_queue_setup               = i40e_dev_rx_queue_setup,
268         .rx_queue_release             = i40e_dev_rx_queue_release,
269         .rx_queue_count               = i40e_dev_rx_queue_count,
270         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
271         .tx_queue_setup               = i40e_dev_tx_queue_setup,
272         .tx_queue_release             = i40e_dev_tx_queue_release,
273         .dev_led_on                   = i40e_dev_led_on,
274         .dev_led_off                  = i40e_dev_led_off,
275         .flow_ctrl_set                = i40e_flow_ctrl_set,
276         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
277         .mac_addr_add                 = i40e_macaddr_add,
278         .mac_addr_remove              = i40e_macaddr_remove,
279         .reta_update                  = i40e_dev_rss_reta_update,
280         .reta_query                   = i40e_dev_rss_reta_query,
281         .rss_hash_update              = i40e_dev_rss_hash_update,
282         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
283         .udp_tunnel_add               = i40e_dev_udp_tunnel_add,
284         .udp_tunnel_del               = i40e_dev_udp_tunnel_del,
285         .filter_ctrl                  = i40e_dev_filter_ctrl,
286         .mirror_rule_set              = i40e_mirror_rule_set,
287         .mirror_rule_reset            = i40e_mirror_rule_reset,
288         .timesync_enable              = i40e_timesync_enable,
289         .timesync_disable             = i40e_timesync_disable,
290         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
291         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
292 };
293
294 static struct eth_driver rte_i40e_pmd = {
295         .pci_drv = {
296                 .name = "rte_i40e_pmd",
297                 .id_table = pci_id_i40e_map,
298                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
299                         RTE_PCI_DRV_DETACHABLE,
300         },
301         .eth_dev_init = eth_i40e_dev_init,
302         .eth_dev_uninit = eth_i40e_dev_uninit,
303         .dev_private_size = sizeof(struct i40e_adapter),
304 };
305
306 static inline int
307 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
308                                      struct rte_eth_link *link)
309 {
310         struct rte_eth_link *dst = link;
311         struct rte_eth_link *src = &(dev->data->dev_link);
312
313         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
314                                         *(uint64_t *)src) == 0)
315                 return -1;
316
317         return 0;
318 }
319
320 static inline int
321 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
322                                       struct rte_eth_link *link)
323 {
324         struct rte_eth_link *dst = &(dev->data->dev_link);
325         struct rte_eth_link *src = link;
326
327         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
328                                         *(uint64_t *)src) == 0)
329                 return -1;
330
331         return 0;
332 }
333
334 /*
335  * Driver initialization routine.
336  * Invoked once at EAL init time.
337  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
338  */
339 static int
340 rte_i40e_pmd_init(const char *name __rte_unused,
341                   const char *params __rte_unused)
342 {
343         PMD_INIT_FUNC_TRACE();
344         rte_eth_driver_register(&rte_i40e_pmd);
345
346         return 0;
347 }
348
349 static struct rte_driver rte_i40e_driver = {
350         .type = PMD_PDEV,
351         .init = rte_i40e_pmd_init,
352 };
353
354 PMD_REGISTER_DRIVER(rte_i40e_driver);
355
356 /*
357  * Initialize registers for flexible payload, which should be set by NVM.
358  * This should be removed from code once it is fixed in NVM.
359  */
360 #ifndef I40E_GLQF_ORT
361 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
362 #endif
363 #ifndef I40E_GLQF_PIT
364 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
365 #endif
366
367 static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
368 {
369         I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
370         I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
371         I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
372         I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
373         I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
374         I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
375         I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
376         I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
377         I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
378         I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
379
380         /* GLQF_PIT Registers */
381         I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
382         I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
383 }
384
385 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
386
387 /*
388  * Add a ethertype filter to drop all flow control frames transmitted
389  * from VSIs.
390 */
391 static void
392 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
393 {
394         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
395         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
396                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
397                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
398         int ret;
399
400         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
401                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
402                                 pf->main_vsi_seid, 0,
403                                 TRUE, NULL, NULL);
404         if (ret)
405                 PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
406                                   " frames from VSIs.");
407 }
408
409 static int
410 eth_i40e_dev_init(struct rte_eth_dev *dev)
411 {
412         struct rte_pci_device *pci_dev;
413         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
414         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
415         struct i40e_vsi *vsi;
416         int ret;
417         uint32_t len;
418         uint8_t aq_fail = 0;
419
420         PMD_INIT_FUNC_TRACE();
421
422         dev->dev_ops = &i40e_eth_dev_ops;
423         dev->rx_pkt_burst = i40e_recv_pkts;
424         dev->tx_pkt_burst = i40e_xmit_pkts;
425
426         /* for secondary processes, we don't initialise any further as primary
427          * has already done this work. Only check we don't need a different
428          * RX function */
429         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
430                 if (dev->data->scattered_rx)
431                         dev->rx_pkt_burst = i40e_recv_scattered_pkts;
432                 return 0;
433         }
434         pci_dev = dev->pci_dev;
435         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
436         pf->adapter->eth_dev = dev;
437         pf->dev_data = dev->data;
438
439         hw->back = I40E_PF_TO_ADAPTER(pf);
440         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
441         if (!hw->hw_addr) {
442                 PMD_INIT_LOG(ERR, "Hardware is not available, "
443                              "as address is NULL");
444                 return -ENODEV;
445         }
446
447         hw->vendor_id = pci_dev->id.vendor_id;
448         hw->device_id = pci_dev->id.device_id;
449         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
450         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
451         hw->bus.device = pci_dev->addr.devid;
452         hw->bus.func = pci_dev->addr.function;
453         hw->adapter_stopped = 0;
454
455         /* Make sure all is clean before doing PF reset */
456         i40e_clear_hw(hw);
457
458         /* Initialize the hardware */
459         i40e_hw_init(hw);
460
461         /* Reset here to make sure all is clean for each PF */
462         ret = i40e_pf_reset(hw);
463         if (ret) {
464                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
465                 return ret;
466         }
467
468         /* Initialize the shared code (base driver) */
469         ret = i40e_init_shared_code(hw);
470         if (ret) {
471                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
472                 return ret;
473         }
474
475         /*
476          * To work around the NVM issue,initialize registers
477          * for flexible payload by software.
478          * It should be removed once issues are fixed in NVM.
479          */
480         i40e_flex_payload_reg_init(hw);
481
482         /* Initialize the parameters for adminq */
483         i40e_init_adminq_parameter(hw);
484         ret = i40e_init_adminq(hw);
485         if (ret != I40E_SUCCESS) {
486                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
487                 return -EIO;
488         }
489         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
490                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
491                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
492                      ((hw->nvm.version >> 12) & 0xf),
493                      ((hw->nvm.version >> 4) & 0xff),
494                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
495
496         /* Disable LLDP */
497         ret = i40e_aq_stop_lldp(hw, true, NULL);
498         if (ret != I40E_SUCCESS) /* Its failure can be ignored */
499                 PMD_INIT_LOG(INFO, "Failed to stop lldp");
500
501         /* Clear PXE mode */
502         i40e_clear_pxe_mode(hw);
503
504         /*
505          * On X710, performance number is far from the expectation on recent
506          * firmware versions. The fix for this issue may not be integrated in
507          * the following firmware version. So the workaround in software driver
508          * is needed. It needs to modify the initial values of 3 internal only
509          * registers. Note that the workaround can be removed when it is fixed
510          * in firmware in the future.
511          */
512         i40e_configure_registers(hw);
513
514         /* Get hw capabilities */
515         ret = i40e_get_cap(hw);
516         if (ret != I40E_SUCCESS) {
517                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
518                 goto err_get_capabilities;
519         }
520
521         /* Initialize parameters for PF */
522         ret = i40e_pf_parameter_init(dev);
523         if (ret != 0) {
524                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
525                 goto err_parameter_init;
526         }
527
528         /* Initialize the queue management */
529         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
530         if (ret < 0) {
531                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
532                 goto err_qp_pool_init;
533         }
534         ret = i40e_res_pool_init(&pf->msix_pool, 1,
535                                 hw->func_caps.num_msix_vectors - 1);
536         if (ret < 0) {
537                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
538                 goto err_msix_pool_init;
539         }
540
541         /* Initialize lan hmc */
542         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
543                                 hw->func_caps.num_rx_qp, 0, 0);
544         if (ret != I40E_SUCCESS) {
545                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
546                 goto err_init_lan_hmc;
547         }
548
549         /* Configure lan hmc */
550         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
551         if (ret != I40E_SUCCESS) {
552                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
553                 goto err_configure_lan_hmc;
554         }
555
556         /* Get and check the mac address */
557         i40e_get_mac_addr(hw, hw->mac.addr);
558         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
559                 PMD_INIT_LOG(ERR, "mac address is not valid");
560                 ret = -EIO;
561                 goto err_get_mac_addr;
562         }
563         /* Copy the permanent MAC address */
564         ether_addr_copy((struct ether_addr *) hw->mac.addr,
565                         (struct ether_addr *) hw->mac.perm_addr);
566
567         /* Disable flow control */
568         hw->fc.requested_mode = I40E_FC_NONE;
569         i40e_set_fc(hw, &aq_fail, TRUE);
570
571         /* PF setup, which includes VSI setup */
572         ret = i40e_pf_setup(pf);
573         if (ret) {
574                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
575                 goto err_setup_pf_switch;
576         }
577
578         vsi = pf->main_vsi;
579
580         /* Disable double vlan by default */
581         i40e_vsi_config_double_vlan(vsi, FALSE);
582
583         if (!vsi->max_macaddrs)
584                 len = ETHER_ADDR_LEN;
585         else
586                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
587
588         /* Should be after VSI initialized */
589         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
590         if (!dev->data->mac_addrs) {
591                 PMD_INIT_LOG(ERR, "Failed to allocated memory "
592                                         "for storing mac address");
593                 goto err_mac_alloc;
594         }
595         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
596                                         &dev->data->mac_addrs[0]);
597
598         /* initialize pf host driver to setup SRIOV resource if applicable */
599         i40e_pf_host_init(dev);
600
601         /* register callback func to eal lib */
602         rte_intr_callback_register(&(pci_dev->intr_handle),
603                 i40e_dev_interrupt_handler, (void *)dev);
604
605         /* configure and enable device interrupt */
606         i40e_pf_config_irq0(hw);
607         i40e_pf_enable_irq0(hw);
608
609         /* enable uio intr after callback register */
610         rte_intr_enable(&(pci_dev->intr_handle));
611         /*
612          * Add an ethertype filter to drop all flow control frames transmitted
613          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
614          * frames to wire.
615          */
616         i40e_add_tx_flow_control_drop_filter(pf);
617
618         /* initialize mirror rule list */
619         TAILQ_INIT(&pf->mirror_list);
620
621         return 0;
622
623 err_mac_alloc:
624         i40e_vsi_release(pf->main_vsi);
625 err_setup_pf_switch:
626 err_get_mac_addr:
627 err_configure_lan_hmc:
628         (void)i40e_shutdown_lan_hmc(hw);
629 err_init_lan_hmc:
630         i40e_res_pool_destroy(&pf->msix_pool);
631 err_msix_pool_init:
632         i40e_res_pool_destroy(&pf->qp_pool);
633 err_qp_pool_init:
634 err_parameter_init:
635 err_get_capabilities:
636         (void)i40e_shutdown_adminq(hw);
637
638         return ret;
639 }
640
641 static int
642 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
643 {
644         struct rte_pci_device *pci_dev;
645         struct i40e_hw *hw;
646         struct i40e_filter_control_settings settings;
647         int ret;
648         uint8_t aq_fail = 0;
649
650         PMD_INIT_FUNC_TRACE();
651
652         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
653                 return 0;
654
655         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
656         pci_dev = dev->pci_dev;
657
658         if (hw->adapter_stopped == 0)
659                 i40e_dev_close(dev);
660
661         dev->dev_ops = NULL;
662         dev->rx_pkt_burst = NULL;
663         dev->tx_pkt_burst = NULL;
664
665         /* Disable LLDP */
666         ret = i40e_aq_stop_lldp(hw, true, NULL);
667         if (ret != I40E_SUCCESS) /* Its failure can be ignored */
668                 PMD_INIT_LOG(INFO, "Failed to stop lldp");
669
670         /* Clear PXE mode */
671         i40e_clear_pxe_mode(hw);
672
673         /* Unconfigure filter control */
674         memset(&settings, 0, sizeof(settings));
675         ret = i40e_set_filter_control(hw, &settings);
676         if (ret)
677                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
678                                         ret);
679
680         /* Disable flow control */
681         hw->fc.requested_mode = I40E_FC_NONE;
682         i40e_set_fc(hw, &aq_fail, TRUE);
683
684         /* uninitialize pf host driver */
685         i40e_pf_host_uninit(dev);
686
687         rte_free(dev->data->mac_addrs);
688         dev->data->mac_addrs = NULL;
689
690         /* disable uio intr before callback unregister */
691         rte_intr_disable(&(pci_dev->intr_handle));
692
693         /* register callback func to eal lib */
694         rte_intr_callback_unregister(&(pci_dev->intr_handle),
695                 i40e_dev_interrupt_handler, (void *)dev);
696
697         return 0;
698 }
699
700 static int
701 i40e_dev_configure(struct rte_eth_dev *dev)
702 {
703         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
704         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
705         int ret;
706
707         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
708                 ret = i40e_fdir_setup(pf);
709                 if (ret != I40E_SUCCESS) {
710                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
711                         return -ENOTSUP;
712                 }
713                 ret = i40e_fdir_configure(dev);
714                 if (ret < 0) {
715                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
716                         goto err;
717                 }
718         } else
719                 i40e_fdir_teardown(pf);
720
721         ret = i40e_dev_init_vlan(dev);
722         if (ret < 0)
723                 goto err;
724
725         /* VMDQ setup.
726          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
727          *  RSS setting have different requirements.
728          *  General PMD driver call sequence are NIC init, configure,
729          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
730          *  will try to lookup the VSI that specific queue belongs to if VMDQ
731          *  applicable. So, VMDQ setting has to be done before
732          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
733          *  For RSS setting, it will try to calculate actual configured RX queue
734          *  number, which will be available after rx_queue_setup(). dev_start()
735          *  function is good to place RSS setup.
736          */
737         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
738                 ret = i40e_vmdq_setup(dev);
739                 if (ret)
740                         goto err;
741         }
742         return 0;
743 err:
744         i40e_fdir_teardown(pf);
745         return ret;
746 }
747
748 void
749 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
750 {
751         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
752         uint16_t msix_vect = vsi->msix_intr;
753         uint16_t i;
754
755         for (i = 0; i < vsi->nb_qps; i++) {
756                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
757                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
758                 rte_wmb();
759         }
760
761         if (vsi->type != I40E_VSI_SRIOV) {
762                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
763                 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
764                                 msix_vect - 1), 0);
765         } else {
766                 uint32_t reg;
767                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
768                         vsi->user_param + (msix_vect - 1);
769
770                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
771         }
772         I40E_WRITE_FLUSH(hw);
773 }
774
775 static inline uint16_t
776 i40e_calc_itr_interval(int16_t interval)
777 {
778         if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
779                 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
780
781         /* Convert to hardware count, as writing each 1 represents 2 us */
782         return (interval/2);
783 }
784
785 void
786 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
787 {
788         uint32_t val;
789         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
790         uint16_t msix_vect = vsi->msix_intr;
791         int i;
792
793         for (i = 0; i < vsi->nb_qps; i++)
794                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
795
796         /* Bind all RX queues to allocated MSIX interrupt */
797         for (i = 0; i < vsi->nb_qps; i++) {
798                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
799                         I40E_QINT_RQCTL_ITR_INDX_MASK |
800                         ((vsi->base_queue + i + 1) <<
801                         I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
802                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
803                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
804
805                 if (i == vsi->nb_qps - 1)
806                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
807                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
808         }
809
810         /* Write first RX queue to Link list register as the head element */
811         if (vsi->type != I40E_VSI_SRIOV) {
812                 uint16_t interval =
813                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
814
815                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
816                                                 (vsi->base_queue <<
817                                 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
818                         (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
819
820                 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
821                                                 msix_vect - 1), interval);
822
823 #ifndef I40E_GLINT_CTL
824 #define I40E_GLINT_CTL                     0x0003F800
825 #define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
826 #endif
827                 /* Disable auto-mask on enabling of all none-zero  interrupt */
828                 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
829                         I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
830         } else {
831                 uint32_t reg;
832
833                 /* num_msix_vectors_vf needs to minus irq0 */
834                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
835                         vsi->user_param + (msix_vect - 1);
836
837                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
838                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
839                                 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
840         }
841
842         I40E_WRITE_FLUSH(hw);
843 }
844
845 static void
846 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
847 {
848         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
849         uint16_t interval = i40e_calc_itr_interval(\
850                         RTE_LIBRTE_I40E_ITR_INTERVAL);
851
852         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
853                                         I40E_PFINT_DYN_CTLN_INTENA_MASK |
854                                         I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
855                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
856                         (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
857 }
858
859 static void
860 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
861 {
862         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
863
864         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
865 }
866
867 static inline uint8_t
868 i40e_parse_link_speed(uint16_t eth_link_speed)
869 {
870         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
871
872         switch (eth_link_speed) {
873         case ETH_LINK_SPEED_40G:
874                 link_speed = I40E_LINK_SPEED_40GB;
875                 break;
876         case ETH_LINK_SPEED_20G:
877                 link_speed = I40E_LINK_SPEED_20GB;
878                 break;
879         case ETH_LINK_SPEED_10G:
880                 link_speed = I40E_LINK_SPEED_10GB;
881                 break;
882         case ETH_LINK_SPEED_1000:
883                 link_speed = I40E_LINK_SPEED_1GB;
884                 break;
885         case ETH_LINK_SPEED_100:
886                 link_speed = I40E_LINK_SPEED_100MB;
887                 break;
888         }
889
890         return link_speed;
891 }
892
893 static int
894 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
895 {
896         enum i40e_status_code status;
897         struct i40e_aq_get_phy_abilities_resp phy_ab;
898         struct i40e_aq_set_phy_config phy_conf;
899         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
900                         I40E_AQ_PHY_FLAG_PAUSE_RX |
901                         I40E_AQ_PHY_FLAG_LOW_POWER;
902         const uint8_t advt = I40E_LINK_SPEED_40GB |
903                         I40E_LINK_SPEED_10GB |
904                         I40E_LINK_SPEED_1GB |
905                         I40E_LINK_SPEED_100MB;
906         int ret = -ENOTSUP;
907
908         /* Skip it on 40G interfaces, as a workaround for the link issue */
909         if (i40e_is_40G_device(hw->device_id))
910                 return I40E_SUCCESS;
911
912         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
913                                               NULL);
914         if (status)
915                 return ret;
916
917         memset(&phy_conf, 0, sizeof(phy_conf));
918
919         /* bits 0-2 use the values from get_phy_abilities_resp */
920         abilities &= ~mask;
921         abilities |= phy_ab.abilities & mask;
922
923         /* update ablities and speed */
924         if (abilities & I40E_AQ_PHY_AN_ENABLED)
925                 phy_conf.link_speed = advt;
926         else
927                 phy_conf.link_speed = force_speed;
928
929         phy_conf.abilities = abilities;
930
931         /* use get_phy_abilities_resp value for the rest */
932         phy_conf.phy_type = phy_ab.phy_type;
933         phy_conf.eee_capability = phy_ab.eee_capability;
934         phy_conf.eeer = phy_ab.eeer_val;
935         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
936
937         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
938                     phy_ab.abilities, phy_ab.link_speed);
939         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
940                     phy_conf.abilities, phy_conf.link_speed);
941
942         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
943         if (status)
944                 return ret;
945
946         return I40E_SUCCESS;
947 }
948
949 static int
950 i40e_apply_link_speed(struct rte_eth_dev *dev)
951 {
952         uint8_t speed;
953         uint8_t abilities = 0;
954         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
955         struct rte_eth_conf *conf = &dev->data->dev_conf;
956
957         speed = i40e_parse_link_speed(conf->link_speed);
958         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
959         if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
960                 abilities |= I40E_AQ_PHY_AN_ENABLED;
961         else
962                 abilities |= I40E_AQ_PHY_LINK_ENABLED;
963
964         return i40e_phy_conf_link(hw, abilities, speed);
965 }
966
967 static int
968 i40e_dev_start(struct rte_eth_dev *dev)
969 {
970         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
971         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
972         struct i40e_vsi *main_vsi = pf->main_vsi;
973         int ret, i;
974
975         hw->adapter_stopped = 0;
976
977         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
978                 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
979                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
980                              dev->data->dev_conf.link_duplex,
981                              dev->data->port_id);
982                 return -EINVAL;
983         }
984
985         /* Initialize VSI */
986         ret = i40e_dev_rxtx_init(pf);
987         if (ret != I40E_SUCCESS) {
988                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
989                 goto err_up;
990         }
991
992         /* Map queues with MSIX interrupt */
993         i40e_vsi_queues_bind_intr(main_vsi);
994         i40e_vsi_enable_queues_intr(main_vsi);
995
996         /* Map VMDQ VSI queues with MSIX interrupt */
997         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
998                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
999                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
1000         }
1001
1002         /* enable FDIR MSIX interrupt */
1003         if (pf->fdir.fdir_vsi) {
1004                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
1005                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1006         }
1007
1008         /* Enable all queues which have been configured */
1009         ret = i40e_dev_switch_queues(pf, TRUE);
1010         if (ret != I40E_SUCCESS) {
1011                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
1012                 goto err_up;
1013         }
1014
1015         /* Enable receiving broadcast packets */
1016         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
1017         if (ret != I40E_SUCCESS)
1018                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1019
1020         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1021                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
1022                                                 true, NULL);
1023                 if (ret != I40E_SUCCESS)
1024                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1025         }
1026
1027         /* Apply link configure */
1028         ret = i40e_apply_link_speed(dev);
1029         if (I40E_SUCCESS != ret) {
1030                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
1031                 goto err_up;
1032         }
1033
1034         return I40E_SUCCESS;
1035
1036 err_up:
1037         i40e_dev_switch_queues(pf, FALSE);
1038         i40e_dev_clear_queues(dev);
1039
1040         return ret;
1041 }
1042
1043 static void
1044 i40e_dev_stop(struct rte_eth_dev *dev)
1045 {
1046         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1047         struct i40e_vsi *main_vsi = pf->main_vsi;
1048         struct i40e_mirror_rule *p_mirror;
1049         int i;
1050
1051         /* Disable all queues */
1052         i40e_dev_switch_queues(pf, FALSE);
1053
1054         /* un-map queues with interrupt registers */
1055         i40e_vsi_disable_queues_intr(main_vsi);
1056         i40e_vsi_queues_unbind_intr(main_vsi);
1057
1058         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1059                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
1060                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
1061         }
1062
1063         if (pf->fdir.fdir_vsi) {
1064                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
1065                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1066         }
1067         /* Clear all queues and release memory */
1068         i40e_dev_clear_queues(dev);
1069
1070         /* Set link down */
1071         i40e_dev_set_link_down(dev);
1072
1073         /* Remove all mirror rules */
1074         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
1075                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
1076                 rte_free(p_mirror);
1077         }
1078         pf->nb_mirror_rule = 0;
1079
1080 }
1081
1082 static void
1083 i40e_dev_close(struct rte_eth_dev *dev)
1084 {
1085         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1086         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1087         uint32_t reg;
1088         int i;
1089
1090         PMD_INIT_FUNC_TRACE();
1091
1092         i40e_dev_stop(dev);
1093         hw->adapter_stopped = 1;
1094         i40e_dev_free_queues(dev);
1095
1096         /* Disable interrupt */
1097         i40e_pf_disable_irq0(hw);
1098         rte_intr_disable(&(dev->pci_dev->intr_handle));
1099
1100         /* shutdown and destroy the HMC */
1101         i40e_shutdown_lan_hmc(hw);
1102
1103         /* release all the existing VSIs and VEBs */
1104         i40e_fdir_teardown(pf);
1105         i40e_vsi_release(pf->main_vsi);
1106
1107         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1108                 i40e_vsi_release(pf->vmdq[i].vsi);
1109                 pf->vmdq[i].vsi = NULL;
1110         }
1111
1112         rte_free(pf->vmdq);
1113         pf->vmdq = NULL;
1114
1115         /* shutdown the adminq */
1116         i40e_aq_queue_shutdown(hw, true);
1117         i40e_shutdown_adminq(hw);
1118
1119         i40e_res_pool_destroy(&pf->qp_pool);
1120         i40e_res_pool_destroy(&pf->msix_pool);
1121
1122         /* force a PF reset to clean anything leftover */
1123         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
1124         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
1125                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1126         I40E_WRITE_FLUSH(hw);
1127 }
1128
1129 static void
1130 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
1131 {
1132         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1133         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1134         struct i40e_vsi *vsi = pf->main_vsi;
1135         int status;
1136
1137         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1138                                                         true, NULL);
1139         if (status != I40E_SUCCESS)
1140                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
1141
1142         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1143                                                         TRUE, NULL);
1144         if (status != I40E_SUCCESS)
1145                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1146
1147 }
1148
1149 static void
1150 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
1151 {
1152         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1153         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1154         struct i40e_vsi *vsi = pf->main_vsi;
1155         int status;
1156
1157         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1158                                                         false, NULL);
1159         if (status != I40E_SUCCESS)
1160                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
1161
1162         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1163                                                         false, NULL);
1164         if (status != I40E_SUCCESS)
1165                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1166 }
1167
1168 static void
1169 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
1170 {
1171         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1172         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1173         struct i40e_vsi *vsi = pf->main_vsi;
1174         int ret;
1175
1176         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
1177         if (ret != I40E_SUCCESS)
1178                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1179 }
1180
1181 static void
1182 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
1183 {
1184         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1185         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1186         struct i40e_vsi *vsi = pf->main_vsi;
1187         int ret;
1188
1189         if (dev->data->promiscuous == 1)
1190                 return; /* must remain in all_multicast mode */
1191
1192         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
1193                                 vsi->seid, FALSE, NULL);
1194         if (ret != I40E_SUCCESS)
1195                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1196 }
1197
1198 /*
1199  * Set device link up.
1200  */
1201 static int
1202 i40e_dev_set_link_up(struct rte_eth_dev *dev)
1203 {
1204         /* re-apply link speed setting */
1205         return i40e_apply_link_speed(dev);
1206 }
1207
1208 /*
1209  * Set device link down.
1210  */
1211 static int
1212 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
1213 {
1214         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
1215         uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1216         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1217
1218         return i40e_phy_conf_link(hw, abilities, speed);
1219 }
1220
1221 int
1222 i40e_dev_link_update(struct rte_eth_dev *dev,
1223                      int wait_to_complete)
1224 {
1225 #define CHECK_INTERVAL 100  /* 100ms */
1226 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1227         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1228         struct i40e_link_status link_status;
1229         struct rte_eth_link link, old;
1230         int status;
1231         unsigned rep_cnt = MAX_REPEAT_TIME;
1232
1233         memset(&link, 0, sizeof(link));
1234         memset(&old, 0, sizeof(old));
1235         memset(&link_status, 0, sizeof(link_status));
1236         rte_i40e_dev_atomic_read_link_status(dev, &old);
1237
1238         do {
1239                 /* Get link status information from hardware */
1240                 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
1241                 if (status != I40E_SUCCESS) {
1242                         link.link_speed = ETH_LINK_SPEED_100;
1243                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1244                         PMD_DRV_LOG(ERR, "Failed to get link info");
1245                         goto out;
1246                 }
1247
1248                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
1249                 if (!wait_to_complete)
1250                         break;
1251
1252                 rte_delay_ms(CHECK_INTERVAL);
1253         } while (!link.link_status && rep_cnt--);
1254
1255         if (!link.link_status)
1256                 goto out;
1257
1258         /* i40e uses full duplex only */
1259         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1260
1261         /* Parse the link status */
1262         switch (link_status.link_speed) {
1263         case I40E_LINK_SPEED_100MB:
1264                 link.link_speed = ETH_LINK_SPEED_100;
1265                 break;
1266         case I40E_LINK_SPEED_1GB:
1267                 link.link_speed = ETH_LINK_SPEED_1000;
1268                 break;
1269         case I40E_LINK_SPEED_10GB:
1270                 link.link_speed = ETH_LINK_SPEED_10G;
1271                 break;
1272         case I40E_LINK_SPEED_20GB:
1273                 link.link_speed = ETH_LINK_SPEED_20G;
1274                 break;
1275         case I40E_LINK_SPEED_40GB:
1276                 link.link_speed = ETH_LINK_SPEED_40G;
1277                 break;
1278         default:
1279                 link.link_speed = ETH_LINK_SPEED_100;
1280                 break;
1281         }
1282
1283 out:
1284         rte_i40e_dev_atomic_write_link_status(dev, &link);
1285         if (link.link_status == old.link_status)
1286                 return -1;
1287
1288         return 0;
1289 }
1290
1291 /* Get all the statistics of a VSI */
1292 void
1293 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1294 {
1295         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1296         struct i40e_eth_stats *nes = &vsi->eth_stats;
1297         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1298         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1299
1300         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1301                             vsi->offset_loaded, &oes->rx_bytes,
1302                             &nes->rx_bytes);
1303         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1304                             vsi->offset_loaded, &oes->rx_unicast,
1305                             &nes->rx_unicast);
1306         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1307                             vsi->offset_loaded, &oes->rx_multicast,
1308                             &nes->rx_multicast);
1309         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1310                             vsi->offset_loaded, &oes->rx_broadcast,
1311                             &nes->rx_broadcast);
1312         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1313                             &oes->rx_discards, &nes->rx_discards);
1314         /* GLV_REPC not supported */
1315         /* GLV_RMPC not supported */
1316         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1317                             &oes->rx_unknown_protocol,
1318                             &nes->rx_unknown_protocol);
1319         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1320                             vsi->offset_loaded, &oes->tx_bytes,
1321                             &nes->tx_bytes);
1322         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1323                             vsi->offset_loaded, &oes->tx_unicast,
1324                             &nes->tx_unicast);
1325         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1326                             vsi->offset_loaded, &oes->tx_multicast,
1327                             &nes->tx_multicast);
1328         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1329                             vsi->offset_loaded,  &oes->tx_broadcast,
1330                             &nes->tx_broadcast);
1331         /* GLV_TDPC not supported */
1332         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1333                             &oes->tx_errors, &nes->tx_errors);
1334         vsi->offset_loaded = true;
1335
1336         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1337                     vsi->vsi_id);
1338         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
1339         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
1340         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
1341         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
1342         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
1343         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1344                     nes->rx_unknown_protocol);
1345         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
1346         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
1347         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
1348         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
1349         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
1350         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
1351         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1352                     vsi->vsi_id);
1353 }
1354
1355 /* Get all statistics of a port */
1356 static void
1357 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1358 {
1359         uint32_t i;
1360         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1361         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1362         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1363         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1364
1365         /* Get statistics of struct i40e_eth_stats */
1366         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1367                             I40E_GLPRT_GORCL(hw->port),
1368                             pf->offset_loaded, &os->eth.rx_bytes,
1369                             &ns->eth.rx_bytes);
1370         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1371                             I40E_GLPRT_UPRCL(hw->port),
1372                             pf->offset_loaded, &os->eth.rx_unicast,
1373                             &ns->eth.rx_unicast);
1374         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1375                             I40E_GLPRT_MPRCL(hw->port),
1376                             pf->offset_loaded, &os->eth.rx_multicast,
1377                             &ns->eth.rx_multicast);
1378         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1379                             I40E_GLPRT_BPRCL(hw->port),
1380                             pf->offset_loaded, &os->eth.rx_broadcast,
1381                             &ns->eth.rx_broadcast);
1382         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1383                             pf->offset_loaded, &os->eth.rx_discards,
1384                             &ns->eth.rx_discards);
1385         /* GLPRT_REPC not supported */
1386         /* GLPRT_RMPC not supported */
1387         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1388                             pf->offset_loaded,
1389                             &os->eth.rx_unknown_protocol,
1390                             &ns->eth.rx_unknown_protocol);
1391         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1392                             I40E_GLPRT_GOTCL(hw->port),
1393                             pf->offset_loaded, &os->eth.tx_bytes,
1394                             &ns->eth.tx_bytes);
1395         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1396                             I40E_GLPRT_UPTCL(hw->port),
1397                             pf->offset_loaded, &os->eth.tx_unicast,
1398                             &ns->eth.tx_unicast);
1399         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1400                             I40E_GLPRT_MPTCL(hw->port),
1401                             pf->offset_loaded, &os->eth.tx_multicast,
1402                             &ns->eth.tx_multicast);
1403         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1404                             I40E_GLPRT_BPTCL(hw->port),
1405                             pf->offset_loaded, &os->eth.tx_broadcast,
1406                             &ns->eth.tx_broadcast);
1407         /* GLPRT_TEPC not supported */
1408
1409         /* additional port specific stats */
1410         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1411                             pf->offset_loaded, &os->tx_dropped_link_down,
1412                             &ns->tx_dropped_link_down);
1413         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1414                             pf->offset_loaded, &os->crc_errors,
1415                             &ns->crc_errors);
1416         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1417                             pf->offset_loaded, &os->illegal_bytes,
1418                             &ns->illegal_bytes);
1419         /* GLPRT_ERRBC not supported */
1420         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1421                             pf->offset_loaded, &os->mac_local_faults,
1422                             &ns->mac_local_faults);
1423         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1424                             pf->offset_loaded, &os->mac_remote_faults,
1425                             &ns->mac_remote_faults);
1426         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1427                             pf->offset_loaded, &os->rx_length_errors,
1428                             &ns->rx_length_errors);
1429         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1430                             pf->offset_loaded, &os->link_xon_rx,
1431                             &ns->link_xon_rx);
1432         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1433                             pf->offset_loaded, &os->link_xoff_rx,
1434                             &ns->link_xoff_rx);
1435         for (i = 0; i < 8; i++) {
1436                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1437                                     pf->offset_loaded,
1438                                     &os->priority_xon_rx[i],
1439                                     &ns->priority_xon_rx[i]);
1440                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1441                                     pf->offset_loaded,
1442                                     &os->priority_xoff_rx[i],
1443                                     &ns->priority_xoff_rx[i]);
1444         }
1445         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1446                             pf->offset_loaded, &os->link_xon_tx,
1447                             &ns->link_xon_tx);
1448         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1449                             pf->offset_loaded, &os->link_xoff_tx,
1450                             &ns->link_xoff_tx);
1451         for (i = 0; i < 8; i++) {
1452                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1453                                     pf->offset_loaded,
1454                                     &os->priority_xon_tx[i],
1455                                     &ns->priority_xon_tx[i]);
1456                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1457                                     pf->offset_loaded,
1458                                     &os->priority_xoff_tx[i],
1459                                     &ns->priority_xoff_tx[i]);
1460                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1461                                     pf->offset_loaded,
1462                                     &os->priority_xon_2_xoff[i],
1463                                     &ns->priority_xon_2_xoff[i]);
1464         }
1465         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1466                             I40E_GLPRT_PRC64L(hw->port),
1467                             pf->offset_loaded, &os->rx_size_64,
1468                             &ns->rx_size_64);
1469         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1470                             I40E_GLPRT_PRC127L(hw->port),
1471                             pf->offset_loaded, &os->rx_size_127,
1472                             &ns->rx_size_127);
1473         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1474                             I40E_GLPRT_PRC255L(hw->port),
1475                             pf->offset_loaded, &os->rx_size_255,
1476                             &ns->rx_size_255);
1477         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1478                             I40E_GLPRT_PRC511L(hw->port),
1479                             pf->offset_loaded, &os->rx_size_511,
1480                             &ns->rx_size_511);
1481         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1482                             I40E_GLPRT_PRC1023L(hw->port),
1483                             pf->offset_loaded, &os->rx_size_1023,
1484                             &ns->rx_size_1023);
1485         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1486                             I40E_GLPRT_PRC1522L(hw->port),
1487                             pf->offset_loaded, &os->rx_size_1522,
1488                             &ns->rx_size_1522);
1489         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1490                             I40E_GLPRT_PRC9522L(hw->port),
1491                             pf->offset_loaded, &os->rx_size_big,
1492                             &ns->rx_size_big);
1493         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1494                             pf->offset_loaded, &os->rx_undersize,
1495                             &ns->rx_undersize);
1496         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1497                             pf->offset_loaded, &os->rx_fragments,
1498                             &ns->rx_fragments);
1499         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1500                             pf->offset_loaded, &os->rx_oversize,
1501                             &ns->rx_oversize);
1502         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1503                             pf->offset_loaded, &os->rx_jabber,
1504                             &ns->rx_jabber);
1505         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1506                             I40E_GLPRT_PTC64L(hw->port),
1507                             pf->offset_loaded, &os->tx_size_64,
1508                             &ns->tx_size_64);
1509         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1510                             I40E_GLPRT_PTC127L(hw->port),
1511                             pf->offset_loaded, &os->tx_size_127,
1512                             &ns->tx_size_127);
1513         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1514                             I40E_GLPRT_PTC255L(hw->port),
1515                             pf->offset_loaded, &os->tx_size_255,
1516                             &ns->tx_size_255);
1517         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1518                             I40E_GLPRT_PTC511L(hw->port),
1519                             pf->offset_loaded, &os->tx_size_511,
1520                             &ns->tx_size_511);
1521         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1522                             I40E_GLPRT_PTC1023L(hw->port),
1523                             pf->offset_loaded, &os->tx_size_1023,
1524                             &ns->tx_size_1023);
1525         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1526                             I40E_GLPRT_PTC1522L(hw->port),
1527                             pf->offset_loaded, &os->tx_size_1522,
1528                             &ns->tx_size_1522);
1529         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1530                             I40E_GLPRT_PTC9522L(hw->port),
1531                             pf->offset_loaded, &os->tx_size_big,
1532                             &ns->tx_size_big);
1533         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
1534                            pf->offset_loaded,
1535                            &os->fd_sb_match, &ns->fd_sb_match);
1536         /* GLPRT_MSPDC not supported */
1537         /* GLPRT_XEC not supported */
1538
1539         pf->offset_loaded = true;
1540
1541         if (pf->main_vsi)
1542                 i40e_update_vsi_stats(pf->main_vsi);
1543
1544         stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1545                                                 ns->eth.rx_broadcast;
1546         stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1547                                                 ns->eth.tx_broadcast;
1548         stats->ibytes   = ns->eth.rx_bytes;
1549         stats->obytes   = ns->eth.tx_bytes;
1550         stats->oerrors  = ns->eth.tx_errors;
1551         stats->imcasts  = ns->eth.rx_multicast;
1552         stats->fdirmatch = ns->fd_sb_match;
1553
1554         /* Rx Errors */
1555         stats->ibadcrc  = ns->crc_errors;
1556         stats->ibadlen  = ns->rx_length_errors + ns->rx_undersize +
1557                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
1558         stats->imissed  = ns->eth.rx_discards;
1559         stats->ierrors  = stats->ibadcrc + stats->ibadlen + stats->imissed;
1560
1561         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
1562         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
1563         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
1564         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
1565         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
1566         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
1567         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1568                     ns->eth.rx_unknown_protocol);
1569         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
1570         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
1571         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
1572         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
1573         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
1574         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
1575
1576         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
1577                     ns->tx_dropped_link_down);
1578         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
1579         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
1580                     ns->illegal_bytes);
1581         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
1582         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
1583                     ns->mac_local_faults);
1584         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
1585                     ns->mac_remote_faults);
1586         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
1587                     ns->rx_length_errors);
1588         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
1589         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
1590         for (i = 0; i < 8; i++) {
1591                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
1592                                 i, ns->priority_xon_rx[i]);
1593                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
1594                                 i, ns->priority_xoff_rx[i]);
1595         }
1596         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
1597         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
1598         for (i = 0; i < 8; i++) {
1599                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
1600                                 i, ns->priority_xon_tx[i]);
1601                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
1602                                 i, ns->priority_xoff_tx[i]);
1603                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
1604                                 i, ns->priority_xon_2_xoff[i]);
1605         }
1606         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
1607         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
1608         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
1609         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
1610         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
1611         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
1612         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
1613         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
1614         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
1615         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
1616         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
1617         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
1618         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
1619         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
1620         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
1621         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
1622         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
1623         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
1624         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
1625                         ns->mac_short_packet_dropped);
1626         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
1627                     ns->checksum_error);
1628         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
1629         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
1630 }
1631
1632 /* Reset the statistics */
1633 static void
1634 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1635 {
1636         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1637
1638         /* It results in reloading the start point of each counter */
1639         pf->offset_loaded = false;
1640 }
1641
1642 static int
1643 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1644                                  __rte_unused uint16_t queue_id,
1645                                  __rte_unused uint8_t stat_idx,
1646                                  __rte_unused uint8_t is_rx)
1647 {
1648         PMD_INIT_FUNC_TRACE();
1649
1650         return -ENOSYS;
1651 }
1652
1653 static void
1654 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1655 {
1656         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1657         struct i40e_vsi *vsi = pf->main_vsi;
1658
1659         dev_info->max_rx_queues = vsi->nb_qps;
1660         dev_info->max_tx_queues = vsi->nb_qps;
1661         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1662         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1663         dev_info->max_mac_addrs = vsi->max_macaddrs;
1664         dev_info->max_vfs = dev->pci_dev->max_vfs;
1665         dev_info->rx_offload_capa =
1666                 DEV_RX_OFFLOAD_VLAN_STRIP |
1667                 DEV_RX_OFFLOAD_QINQ_STRIP |
1668                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1669                 DEV_RX_OFFLOAD_UDP_CKSUM |
1670                 DEV_RX_OFFLOAD_TCP_CKSUM;
1671         dev_info->tx_offload_capa =
1672                 DEV_TX_OFFLOAD_VLAN_INSERT |
1673                 DEV_TX_OFFLOAD_QINQ_INSERT |
1674                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1675                 DEV_TX_OFFLOAD_UDP_CKSUM |
1676                 DEV_TX_OFFLOAD_TCP_CKSUM |
1677                 DEV_TX_OFFLOAD_SCTP_CKSUM |
1678                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1679                 DEV_TX_OFFLOAD_TCP_TSO;
1680         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
1681                                                 sizeof(uint32_t);
1682         dev_info->reta_size = pf->hash_lut_size;
1683         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
1684
1685         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1686                 .rx_thresh = {
1687                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
1688                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
1689                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
1690                 },
1691                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1692                 .rx_drop_en = 0,
1693         };
1694
1695         dev_info->default_txconf = (struct rte_eth_txconf) {
1696                 .tx_thresh = {
1697                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
1698                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
1699                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
1700                 },
1701                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1702                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1703                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1704                                 ETH_TXQ_FLAGS_NOOFFLOADS,
1705         };
1706
1707         if (pf->flags & I40E_FLAG_VMDQ) {
1708                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
1709                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
1710                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
1711                                                 pf->max_nb_vmdq_vsi;
1712                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
1713                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
1714                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
1715         }
1716 }
1717
1718 static int
1719 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1720 {
1721         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1722         struct i40e_vsi *vsi = pf->main_vsi;
1723         PMD_INIT_FUNC_TRACE();
1724
1725         if (on)
1726                 return i40e_vsi_add_vlan(vsi, vlan_id);
1727         else
1728                 return i40e_vsi_delete_vlan(vsi, vlan_id);
1729 }
1730
1731 static void
1732 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1733                    __rte_unused uint16_t tpid)
1734 {
1735         PMD_INIT_FUNC_TRACE();
1736 }
1737
1738 static void
1739 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1740 {
1741         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1742         struct i40e_vsi *vsi = pf->main_vsi;
1743
1744         if (mask & ETH_VLAN_STRIP_MASK) {
1745                 /* Enable or disable VLAN stripping */
1746                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1747                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
1748                 else
1749                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
1750         }
1751
1752         if (mask & ETH_VLAN_EXTEND_MASK) {
1753                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1754                         i40e_vsi_config_double_vlan(vsi, TRUE);
1755                 else
1756                         i40e_vsi_config_double_vlan(vsi, FALSE);
1757         }
1758 }
1759
1760 static void
1761 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1762                           __rte_unused uint16_t queue,
1763                           __rte_unused int on)
1764 {
1765         PMD_INIT_FUNC_TRACE();
1766 }
1767
1768 static int
1769 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1770 {
1771         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1772         struct i40e_vsi *vsi = pf->main_vsi;
1773         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1774         struct i40e_vsi_vlan_pvid_info info;
1775
1776         memset(&info, 0, sizeof(info));
1777         info.on = on;
1778         if (info.on)
1779                 info.config.pvid = pvid;
1780         else {
1781                 info.config.reject.tagged =
1782                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
1783                 info.config.reject.untagged =
1784                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
1785         }
1786
1787         return i40e_vsi_vlan_pvid_set(vsi, &info);
1788 }
1789
1790 static int
1791 i40e_dev_led_on(struct rte_eth_dev *dev)
1792 {
1793         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1794         uint32_t mode = i40e_led_get(hw);
1795
1796         if (mode == 0)
1797                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1798
1799         return 0;
1800 }
1801
1802 static int
1803 i40e_dev_led_off(struct rte_eth_dev *dev)
1804 {
1805         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1806         uint32_t mode = i40e_led_get(hw);
1807
1808         if (mode != 0)
1809                 i40e_led_set(hw, 0, false);
1810
1811         return 0;
1812 }
1813
1814 static int
1815 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1816                    __rte_unused struct rte_eth_fc_conf *fc_conf)
1817 {
1818         PMD_INIT_FUNC_TRACE();
1819
1820         return -ENOSYS;
1821 }
1822
1823 static int
1824 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1825                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1826 {
1827         PMD_INIT_FUNC_TRACE();
1828
1829         return -ENOSYS;
1830 }
1831
1832 /* Add a MAC address, and update filters */
1833 static void
1834 i40e_macaddr_add(struct rte_eth_dev *dev,
1835                  struct ether_addr *mac_addr,
1836                  __rte_unused uint32_t index,
1837                  uint32_t pool)
1838 {
1839         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1840         struct i40e_mac_filter_info mac_filter;
1841         struct i40e_vsi *vsi;
1842         int ret;
1843
1844         /* If VMDQ not enabled or configured, return */
1845         if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
1846                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
1847                         pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
1848                         pool);
1849                 return;
1850         }
1851
1852         if (pool > pf->nb_cfg_vmdq_vsi) {
1853                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
1854                                 pool, pf->nb_cfg_vmdq_vsi);
1855                 return;
1856         }
1857
1858         (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
1859         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
1860
1861         if (pool == 0)
1862                 vsi = pf->main_vsi;
1863         else
1864                 vsi = pf->vmdq[pool - 1].vsi;
1865
1866         ret = i40e_vsi_add_mac(vsi, &mac_filter);
1867         if (ret != I40E_SUCCESS) {
1868                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
1869                 return;
1870         }
1871 }
1872
1873 /* Remove a MAC address, and update filters */
1874 static void
1875 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1876 {
1877         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1878         struct i40e_vsi *vsi;
1879         struct rte_eth_dev_data *data = dev->data;
1880         struct ether_addr *macaddr;
1881         int ret;
1882         uint32_t i;
1883         uint64_t pool_sel;
1884
1885         macaddr = &(data->mac_addrs[index]);
1886
1887         pool_sel = dev->data->mac_pool_sel[index];
1888
1889         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
1890                 if (pool_sel & (1ULL << i)) {
1891                         if (i == 0)
1892                                 vsi = pf->main_vsi;
1893                         else {
1894                                 /* No VMDQ pool enabled or configured */
1895                                 if (!(pf->flags | I40E_FLAG_VMDQ) ||
1896                                         (i > pf->nb_cfg_vmdq_vsi)) {
1897                                         PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
1898                                                         "/configured");
1899                                         return;
1900                                 }
1901                                 vsi = pf->vmdq[i - 1].vsi;
1902                         }
1903                         ret = i40e_vsi_delete_mac(vsi, macaddr);
1904
1905                         if (ret) {
1906                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
1907                                 return;
1908                         }
1909                 }
1910         }
1911 }
1912
1913 /* Set perfect match or hash match of MAC and VLAN for a VF */
1914 static int
1915 i40e_vf_mac_filter_set(struct i40e_pf *pf,
1916                  struct rte_eth_mac_filter *filter,
1917                  bool add)
1918 {
1919         struct i40e_hw *hw;
1920         struct i40e_mac_filter_info mac_filter;
1921         struct ether_addr old_mac;
1922         struct ether_addr *new_mac;
1923         struct i40e_pf_vf *vf = NULL;
1924         uint16_t vf_id;
1925         int ret;
1926
1927         if (pf == NULL) {
1928                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
1929                 return -EINVAL;
1930         }
1931         hw = I40E_PF_TO_HW(pf);
1932
1933         if (filter == NULL) {
1934                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
1935                 return -EINVAL;
1936         }
1937
1938         new_mac = &filter->mac_addr;
1939
1940         if (is_zero_ether_addr(new_mac)) {
1941                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
1942                 return -EINVAL;
1943         }
1944
1945         vf_id = filter->dst_id;
1946
1947         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1948                 PMD_DRV_LOG(ERR, "Invalid argument.");
1949                 return -EINVAL;
1950         }
1951         vf = &pf->vfs[vf_id];
1952
1953         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
1954                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
1955                 return -EINVAL;
1956         }
1957
1958         if (add) {
1959                 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1960                 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
1961                                 ETHER_ADDR_LEN);
1962                 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
1963                                  ETHER_ADDR_LEN);
1964
1965                 mac_filter.filter_type = filter->filter_type;
1966                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
1967                 if (ret != I40E_SUCCESS) {
1968                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
1969                         return -1;
1970                 }
1971                 ether_addr_copy(new_mac, &pf->dev_addr);
1972         } else {
1973                 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
1974                                 ETHER_ADDR_LEN);
1975                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
1976                 if (ret != I40E_SUCCESS) {
1977                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
1978                         return -1;
1979                 }
1980
1981                 /* Clear device address as it has been removed */
1982                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
1983                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1984         }
1985
1986         return 0;
1987 }
1988
1989 /* MAC filter handle */
1990 static int
1991 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1992                 void *arg)
1993 {
1994         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1995         struct rte_eth_mac_filter *filter;
1996         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1997         int ret = I40E_NOT_SUPPORTED;
1998
1999         filter = (struct rte_eth_mac_filter *)(arg);
2000
2001         switch (filter_op) {
2002         case RTE_ETH_FILTER_NOP:
2003                 ret = I40E_SUCCESS;
2004                 break;
2005         case RTE_ETH_FILTER_ADD:
2006                 i40e_pf_disable_irq0(hw);
2007                 if (filter->is_vf)
2008                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
2009                 i40e_pf_enable_irq0(hw);
2010                 break;
2011         case RTE_ETH_FILTER_DELETE:
2012                 i40e_pf_disable_irq0(hw);
2013                 if (filter->is_vf)
2014                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
2015                 i40e_pf_enable_irq0(hw);
2016                 break;
2017         default:
2018                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
2019                 ret = I40E_ERR_PARAM;
2020                 break;
2021         }
2022
2023         return ret;
2024 }
2025
2026 static int
2027 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
2028                          struct rte_eth_rss_reta_entry64 *reta_conf,
2029                          uint16_t reta_size)
2030 {
2031         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2032         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2033         uint32_t lut, l;
2034         uint16_t i, j, lut_size = pf->hash_lut_size;
2035         uint16_t idx, shift;
2036         uint8_t mask;
2037
2038         if (reta_size != lut_size ||
2039                 reta_size > ETH_RSS_RETA_SIZE_512) {
2040                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2041                         "(%d) doesn't match the number hardware can supported "
2042                                         "(%d)\n", reta_size, lut_size);
2043                 return -EINVAL;
2044         }
2045
2046         for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
2047                 idx = i / RTE_RETA_GROUP_SIZE;
2048                 shift = i % RTE_RETA_GROUP_SIZE;
2049                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2050                                                 I40E_4_BIT_MASK);
2051                 if (!mask)
2052                         continue;
2053                 if (mask == I40E_4_BIT_MASK)
2054                         l = 0;
2055                 else
2056                         l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
2057                 for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
2058                         if (mask & (0x1 << j))
2059                                 lut |= reta_conf[idx].reta[shift + j] <<
2060                                                         (CHAR_BIT * j);
2061                         else
2062                                 lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
2063                 }
2064                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
2065         }
2066
2067         return 0;
2068 }
2069
2070 static int
2071 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
2072                         struct rte_eth_rss_reta_entry64 *reta_conf,
2073                         uint16_t reta_size)
2074 {
2075         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2076         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2077         uint32_t lut;
2078         uint16_t i, j, lut_size = pf->hash_lut_size;
2079         uint16_t idx, shift;
2080         uint8_t mask;
2081
2082         if (reta_size != lut_size ||
2083                 reta_size > ETH_RSS_RETA_SIZE_512) {
2084                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2085                         "(%d) doesn't match the number hardware can supported "
2086                                         "(%d)\n", reta_size, lut_size);
2087                 return -EINVAL;
2088         }
2089
2090         for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
2091                 idx = i / RTE_RETA_GROUP_SIZE;
2092                 shift = i % RTE_RETA_GROUP_SIZE;
2093                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2094                                                 I40E_4_BIT_MASK);
2095                 if (!mask)
2096                         continue;
2097
2098                 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
2099                 for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
2100                         if (mask & (0x1 << j))
2101                                 reta_conf[idx].reta[shift + j] = ((lut >>
2102                                         (CHAR_BIT * j)) & I40E_8_BIT_MASK);
2103                 }
2104         }
2105
2106         return 0;
2107 }
2108
2109 /**
2110  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
2111  * @hw:   pointer to the HW structure
2112  * @mem:  pointer to mem struct to fill out
2113  * @size: size of memory requested
2114  * @alignment: what to align the allocation to
2115  **/
2116 enum i40e_status_code
2117 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2118                         struct i40e_dma_mem *mem,
2119                         u64 size,
2120                         u32 alignment)
2121 {
2122         static uint64_t id = 0;
2123         const struct rte_memzone *mz = NULL;
2124         char z_name[RTE_MEMZONE_NAMESIZE];
2125
2126         if (!mem)
2127                 return I40E_ERR_PARAM;
2128
2129         id++;
2130         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
2131 #ifdef RTE_LIBRTE_XEN_DOM0
2132         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
2133                                          alignment, RTE_PGSIZE_2M);
2134 #else
2135         mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY, 0,
2136                                          alignment);
2137 #endif
2138         if (!mz)
2139                 return I40E_ERR_NO_MEMORY;
2140
2141         mem->id = id;
2142         mem->size = size;
2143         mem->va = mz->addr;
2144 #ifdef RTE_LIBRTE_XEN_DOM0
2145         mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
2146 #else
2147         mem->pa = mz->phys_addr;
2148 #endif
2149
2150         return I40E_SUCCESS;
2151 }
2152
2153 /**
2154  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
2155  * @hw:   pointer to the HW structure
2156  * @mem:  ptr to mem struct to free
2157  **/
2158 enum i40e_status_code
2159 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2160                     struct i40e_dma_mem *mem)
2161 {
2162         if (!mem || !mem->va)
2163                 return I40E_ERR_PARAM;
2164
2165         mem->va = NULL;
2166         mem->pa = (u64)0;
2167
2168         return I40E_SUCCESS;
2169 }
2170
2171 /**
2172  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
2173  * @hw:   pointer to the HW structure
2174  * @mem:  pointer to mem struct to fill out
2175  * @size: size of memory requested
2176  **/
2177 enum i40e_status_code
2178 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2179                          struct i40e_virt_mem *mem,
2180                          u32 size)
2181 {
2182         if (!mem)
2183                 return I40E_ERR_PARAM;
2184
2185         mem->size = size;
2186         mem->va = rte_zmalloc("i40e", size, 0);
2187
2188         if (mem->va)
2189                 return I40E_SUCCESS;
2190         else
2191                 return I40E_ERR_NO_MEMORY;
2192 }
2193
2194 /**
2195  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
2196  * @hw:   pointer to the HW structure
2197  * @mem:  pointer to mem struct to free
2198  **/
2199 enum i40e_status_code
2200 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2201                      struct i40e_virt_mem *mem)
2202 {
2203         if (!mem)
2204                 return I40E_ERR_PARAM;
2205
2206         rte_free(mem->va);
2207         mem->va = NULL;
2208
2209         return I40E_SUCCESS;
2210 }
2211
2212 void
2213 i40e_init_spinlock_d(struct i40e_spinlock *sp)
2214 {
2215         rte_spinlock_init(&sp->spinlock);
2216 }
2217
2218 void
2219 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
2220 {
2221         rte_spinlock_lock(&sp->spinlock);
2222 }
2223
2224 void
2225 i40e_release_spinlock_d(struct i40e_spinlock *sp)
2226 {
2227         rte_spinlock_unlock(&sp->spinlock);
2228 }
2229
2230 void
2231 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
2232 {
2233         return;
2234 }
2235
2236 /**
2237  * Get the hardware capabilities, which will be parsed
2238  * and saved into struct i40e_hw.
2239  */
2240 static int
2241 i40e_get_cap(struct i40e_hw *hw)
2242 {
2243         struct i40e_aqc_list_capabilities_element_resp *buf;
2244         uint16_t len, size = 0;
2245         int ret;
2246
2247         /* Calculate a huge enough buff for saving response data temporarily */
2248         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
2249                                                 I40E_MAX_CAP_ELE_NUM;
2250         buf = rte_zmalloc("i40e", len, 0);
2251         if (!buf) {
2252                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
2253                 return I40E_ERR_NO_MEMORY;
2254         }
2255
2256         /* Get, parse the capabilities and save it to hw */
2257         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
2258                         i40e_aqc_opc_list_func_capabilities, NULL);
2259         if (ret != I40E_SUCCESS)
2260                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
2261
2262         /* Free the temporary buffer after being used */
2263         rte_free(buf);
2264
2265         return ret;
2266 }
2267
2268 static int
2269 i40e_pf_parameter_init(struct rte_eth_dev *dev)
2270 {
2271         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2272         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2273         uint16_t sum_queues = 0, sum_vsis, left_queues;
2274
2275         /* First check if FW support SRIOV */
2276         if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
2277                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
2278                 return -EINVAL;
2279         }
2280
2281         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
2282         pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
2283         PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
2284         /* Allocate queues for pf */
2285         if (hw->func_caps.rss) {
2286                 pf->flags |= I40E_FLAG_RSS;
2287                 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
2288                         (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
2289                 pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
2290         } else
2291                 pf->lan_nb_qps = 1;
2292         sum_queues = pf->lan_nb_qps;
2293         /* Default VSI is not counted in */
2294         sum_vsis = 0;
2295         PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
2296
2297         if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
2298                 pf->flags |= I40E_FLAG_SRIOV;
2299                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
2300                 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
2301                         PMD_INIT_LOG(ERR, "Config VF number %u, "
2302                                      "max supported %u.",
2303                                      dev->pci_dev->max_vfs,
2304                                      hw->func_caps.num_vfs);
2305                         return -EINVAL;
2306                 }
2307                 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
2308                         PMD_INIT_LOG(ERR, "FVL VF queue %u, "
2309                                      "max support %u queues.",
2310                                      pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
2311                         return -EINVAL;
2312                 }
2313                 pf->vf_num = dev->pci_dev->max_vfs;
2314                 sum_queues += pf->vf_nb_qps * pf->vf_num;
2315                 sum_vsis   += pf->vf_num;
2316                 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
2317                              pf->vf_num, pf->vf_nb_qps);
2318         } else
2319                 pf->vf_num = 0;
2320
2321         if (hw->func_caps.vmdq) {
2322                 pf->flags |= I40E_FLAG_VMDQ;
2323                 pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2324                 pf->max_nb_vmdq_vsi = 1;
2325                 /*
2326                  * If VMDQ available, assume a single VSI can be created.  Will adjust
2327                  * later.
2328                  */
2329                 sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
2330                 sum_vsis += pf->max_nb_vmdq_vsi;
2331         } else {
2332                 pf->vmdq_nb_qps = 0;
2333                 pf->max_nb_vmdq_vsi = 0;
2334         }
2335         pf->nb_cfg_vmdq_vsi = 0;
2336
2337         if (hw->func_caps.fd) {
2338                 pf->flags |= I40E_FLAG_FDIR;
2339                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
2340                 /**
2341                  * Each flow director consumes one VSI and one queue,
2342                  * but can't calculate out predictably here.
2343                  */
2344         }
2345
2346         if (sum_vsis > pf->max_num_vsi ||
2347                 sum_queues > hw->func_caps.num_rx_qp) {
2348                 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
2349                 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
2350                              pf->max_num_vsi, sum_vsis);
2351                 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
2352                              hw->func_caps.num_rx_qp, sum_queues);
2353                 return -EINVAL;
2354         }
2355
2356         /* Adjust VMDQ setting to support as many VMs as possible */
2357         if (pf->flags & I40E_FLAG_VMDQ) {
2358                 left_queues = hw->func_caps.num_rx_qp - sum_queues;
2359
2360                 pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
2361                                         pf->max_num_vsi - sum_vsis);
2362
2363                 /* Limit the max VMDQ number that rte_ether that can support  */
2364                 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
2365                                         ETH_64_POOLS - 1);
2366
2367                 PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
2368                                 pf->max_nb_vmdq_vsi);
2369                 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
2370         }
2371
2372         /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
2373          * cause */
2374         if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
2375                 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
2376                              sum_vsis, hw->func_caps.num_msix_vectors);
2377                 return -EINVAL;
2378         }
2379         return I40E_SUCCESS;
2380 }
2381
2382 static int
2383 i40e_pf_get_switch_config(struct i40e_pf *pf)
2384 {
2385         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2386         struct i40e_aqc_get_switch_config_resp *switch_config;
2387         struct i40e_aqc_switch_config_element_resp *element;
2388         uint16_t start_seid = 0, num_reported;
2389         int ret;
2390
2391         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
2392                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
2393         if (!switch_config) {
2394                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
2395                 return -ENOMEM;
2396         }
2397
2398         /* Get the switch configurations */
2399         ret = i40e_aq_get_switch_config(hw, switch_config,
2400                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
2401         if (ret != I40E_SUCCESS) {
2402                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
2403                 goto fail;
2404         }
2405         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
2406         if (num_reported != 1) { /* The number should be 1 */
2407                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
2408                 goto fail;
2409         }
2410
2411         /* Parse the switch configuration elements */
2412         element = &(switch_config->element[0]);
2413         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
2414                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
2415                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
2416         } else
2417                 PMD_DRV_LOG(INFO, "Unknown element type");
2418
2419 fail:
2420         rte_free(switch_config);
2421
2422         return ret;
2423 }
2424
2425 static int
2426 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
2427                         uint32_t num)
2428 {
2429         struct pool_entry *entry;
2430
2431         if (pool == NULL || num == 0)
2432                 return -EINVAL;
2433
2434         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
2435         if (entry == NULL) {
2436                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
2437                 return -ENOMEM;
2438         }
2439
2440         /* queue heap initialize */
2441         pool->num_free = num;
2442         pool->num_alloc = 0;
2443         pool->base = base;
2444         LIST_INIT(&pool->alloc_list);
2445         LIST_INIT(&pool->free_list);
2446
2447         /* Initialize element  */
2448         entry->base = 0;
2449         entry->len = num;
2450
2451         LIST_INSERT_HEAD(&pool->free_list, entry, next);
2452         return 0;
2453 }
2454
2455 static void
2456 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
2457 {
2458         struct pool_entry *entry;
2459
2460         if (pool == NULL)
2461                 return;
2462
2463         LIST_FOREACH(entry, &pool->alloc_list, next) {
2464                 LIST_REMOVE(entry, next);
2465                 rte_free(entry);
2466         }
2467
2468         LIST_FOREACH(entry, &pool->free_list, next) {
2469                 LIST_REMOVE(entry, next);
2470                 rte_free(entry);
2471         }
2472
2473         pool->num_free = 0;
2474         pool->num_alloc = 0;
2475         pool->base = 0;
2476         LIST_INIT(&pool->alloc_list);
2477         LIST_INIT(&pool->free_list);
2478 }
2479
2480 static int
2481 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2482                        uint32_t base)
2483 {
2484         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2485         uint32_t pool_offset;
2486         int insert;
2487
2488         if (pool == NULL) {
2489                 PMD_DRV_LOG(ERR, "Invalid parameter");
2490                 return -EINVAL;
2491         }
2492
2493         pool_offset = base - pool->base;
2494         /* Lookup in alloc list */
2495         LIST_FOREACH(entry, &pool->alloc_list, next) {
2496                 if (entry->base == pool_offset) {
2497                         valid_entry = entry;
2498                         LIST_REMOVE(entry, next);
2499                         break;
2500                 }
2501         }
2502
2503         /* Not find, return */
2504         if (valid_entry == NULL) {
2505                 PMD_DRV_LOG(ERR, "Failed to find entry");
2506                 return -EINVAL;
2507         }
2508
2509         /**
2510          * Found it, move it to free list  and try to merge.
2511          * In order to make merge easier, always sort it by qbase.
2512          * Find adjacent prev and last entries.
2513          */
2514         prev = next = NULL;
2515         LIST_FOREACH(entry, &pool->free_list, next) {
2516                 if (entry->base > valid_entry->base) {
2517                         next = entry;
2518                         break;
2519                 }
2520                 prev = entry;
2521         }
2522
2523         insert = 0;
2524         /* Try to merge with next one*/
2525         if (next != NULL) {
2526                 /* Merge with next one */
2527                 if (valid_entry->base + valid_entry->len == next->base) {
2528                         next->base = valid_entry->base;
2529                         next->len += valid_entry->len;
2530                         rte_free(valid_entry);
2531                         valid_entry = next;
2532                         insert = 1;
2533                 }
2534         }
2535
2536         if (prev != NULL) {
2537                 /* Merge with previous one */
2538                 if (prev->base + prev->len == valid_entry->base) {
2539                         prev->len += valid_entry->len;
2540                         /* If it merge with next one, remove next node */
2541                         if (insert == 1) {
2542                                 LIST_REMOVE(valid_entry, next);
2543                                 rte_free(valid_entry);
2544                         } else {
2545                                 rte_free(valid_entry);
2546                                 insert = 1;
2547                         }
2548                 }
2549         }
2550
2551         /* Not find any entry to merge, insert */
2552         if (insert == 0) {
2553                 if (prev != NULL)
2554                         LIST_INSERT_AFTER(prev, valid_entry, next);
2555                 else if (next != NULL)
2556                         LIST_INSERT_BEFORE(next, valid_entry, next);
2557                 else /* It's empty list, insert to head */
2558                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2559         }
2560
2561         pool->num_free += valid_entry->len;
2562         pool->num_alloc -= valid_entry->len;
2563
2564         return 0;
2565 }
2566
2567 static int
2568 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2569                        uint16_t num)
2570 {
2571         struct pool_entry *entry, *valid_entry;
2572
2573         if (pool == NULL || num == 0) {
2574                 PMD_DRV_LOG(ERR, "Invalid parameter");
2575                 return -EINVAL;
2576         }
2577
2578         if (pool->num_free < num) {
2579                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
2580                             num, pool->num_free);
2581                 return -ENOMEM;
2582         }
2583
2584         valid_entry = NULL;
2585         /* Lookup  in free list and find most fit one */
2586         LIST_FOREACH(entry, &pool->free_list, next) {
2587                 if (entry->len >= num) {
2588                         /* Find best one */
2589                         if (entry->len == num) {
2590                                 valid_entry = entry;
2591                                 break;
2592                         }
2593                         if (valid_entry == NULL || valid_entry->len > entry->len)
2594                                 valid_entry = entry;
2595                 }
2596         }
2597
2598         /* Not find one to satisfy the request, return */
2599         if (valid_entry == NULL) {
2600                 PMD_DRV_LOG(ERR, "No valid entry found");
2601                 return -ENOMEM;
2602         }
2603         /**
2604          * The entry have equal queue number as requested,
2605          * remove it from alloc_list.
2606          */
2607         if (valid_entry->len == num) {
2608                 LIST_REMOVE(valid_entry, next);
2609         } else {
2610                 /**
2611                  * The entry have more numbers than requested,
2612                  * create a new entry for alloc_list and minus its
2613                  * queue base and number in free_list.
2614                  */
2615                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2616                 if (entry == NULL) {
2617                         PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2618                                     "resource pool");
2619                         return -ENOMEM;
2620                 }
2621                 entry->base = valid_entry->base;
2622                 entry->len = num;
2623                 valid_entry->base += num;
2624                 valid_entry->len -= num;
2625                 valid_entry = entry;
2626         }
2627
2628         /* Insert it into alloc list, not sorted */
2629         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2630
2631         pool->num_free -= valid_entry->len;
2632         pool->num_alloc += valid_entry->len;
2633
2634         return (valid_entry->base + pool->base);
2635 }
2636
2637 /**
2638  * bitmap_is_subset - Check whether src2 is subset of src1
2639  **/
2640 static inline int
2641 bitmap_is_subset(uint8_t src1, uint8_t src2)
2642 {
2643         return !((src1 ^ src2) & src2);
2644 }
2645
2646 static int
2647 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2648 {
2649         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2650
2651         /* If DCB is not supported, only default TC is supported */
2652         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2653                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
2654                 return -EINVAL;
2655         }
2656
2657         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2658                 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2659                             "HW support 0x%x", hw->func_caps.enabled_tcmap,
2660                             enabled_tcmap);
2661                 return -EINVAL;
2662         }
2663         return I40E_SUCCESS;
2664 }
2665
2666 int
2667 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2668                                 struct i40e_vsi_vlan_pvid_info *info)
2669 {
2670         struct i40e_hw *hw;
2671         struct i40e_vsi_context ctxt;
2672         uint8_t vlan_flags = 0;
2673         int ret;
2674
2675         if (vsi == NULL || info == NULL) {
2676                 PMD_DRV_LOG(ERR, "invalid parameters");
2677                 return I40E_ERR_PARAM;
2678         }
2679
2680         if (info->on) {
2681                 vsi->info.pvid = info->config.pvid;
2682                 /**
2683                  * If insert pvid is enabled, only tagged pkts are
2684                  * allowed to be sent out.
2685                  */
2686                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2687                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2688         } else {
2689                 vsi->info.pvid = 0;
2690                 if (info->config.reject.tagged == 0)
2691                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2692
2693                 if (info->config.reject.untagged == 0)
2694                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2695         }
2696         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2697                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
2698         vsi->info.port_vlan_flags |= vlan_flags;
2699         vsi->info.valid_sections =
2700                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2701         memset(&ctxt, 0, sizeof(ctxt));
2702         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2703         ctxt.seid = vsi->seid;
2704
2705         hw = I40E_VSI_TO_HW(vsi);
2706         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2707         if (ret != I40E_SUCCESS)
2708                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2709
2710         return ret;
2711 }
2712
2713 static int
2714 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2715 {
2716         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2717         int i, ret;
2718         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2719
2720         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2721         if (ret != I40E_SUCCESS)
2722                 return ret;
2723
2724         if (!vsi->seid) {
2725                 PMD_DRV_LOG(ERR, "seid not valid");
2726                 return -EINVAL;
2727         }
2728
2729         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2730         tc_bw_data.tc_valid_bits = enabled_tcmap;
2731         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2732                 tc_bw_data.tc_bw_credits[i] =
2733                         (enabled_tcmap & (1 << i)) ? 1 : 0;
2734
2735         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2736         if (ret != I40E_SUCCESS) {
2737                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
2738                 return ret;
2739         }
2740
2741         (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2742                                         sizeof(vsi->info.qs_handle));
2743         return I40E_SUCCESS;
2744 }
2745
2746 static int
2747 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2748                                  struct i40e_aqc_vsi_properties_data *info,
2749                                  uint8_t enabled_tcmap)
2750 {
2751         int ret, total_tc = 0, i;
2752         uint16_t qpnum_per_tc, bsf, qp_idx;
2753
2754         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2755         if (ret != I40E_SUCCESS)
2756                 return ret;
2757
2758         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2759                 if (enabled_tcmap & (1 << i))
2760                         total_tc++;
2761         vsi->enabled_tc = enabled_tcmap;
2762
2763         /* Number of queues per enabled TC */
2764         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
2765         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2766         bsf = rte_bsf32(qpnum_per_tc);
2767
2768         /* Adjust the queue number to actual queues that can be applied */
2769         vsi->nb_qps = qpnum_per_tc * total_tc;
2770
2771         /**
2772          * Configure TC and queue mapping parameters, for enabled TC,
2773          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2774          * default queue will serve it.
2775          */
2776         qp_idx = 0;
2777         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2778                 if (vsi->enabled_tc & (1 << i)) {
2779                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2780                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2781                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2782                         qp_idx += qpnum_per_tc;
2783                 } else
2784                         info->tc_mapping[i] = 0;
2785         }
2786
2787         /* Associate queue number with VSI */
2788         if (vsi->type == I40E_VSI_SRIOV) {
2789                 info->mapping_flags |=
2790                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2791                 for (i = 0; i < vsi->nb_qps; i++)
2792                         info->queue_mapping[i] =
2793                                 rte_cpu_to_le_16(vsi->base_queue + i);
2794         } else {
2795                 info->mapping_flags |=
2796                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2797                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2798         }
2799         info->valid_sections |=
2800                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2801
2802         return I40E_SUCCESS;
2803 }
2804
2805 static int
2806 i40e_veb_release(struct i40e_veb *veb)
2807 {
2808         struct i40e_vsi *vsi;
2809         struct i40e_hw *hw;
2810
2811         if (veb == NULL || veb->associate_vsi == NULL)
2812                 return -EINVAL;
2813
2814         if (!TAILQ_EMPTY(&veb->head)) {
2815                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
2816                 return -EACCES;
2817         }
2818
2819         vsi = veb->associate_vsi;
2820         hw = I40E_VSI_TO_HW(vsi);
2821
2822         vsi->uplink_seid = veb->uplink_seid;
2823         i40e_aq_delete_element(hw, veb->seid, NULL);
2824         rte_free(veb);
2825         vsi->veb = NULL;
2826         return I40E_SUCCESS;
2827 }
2828
2829 /* Setup a veb */
2830 static struct i40e_veb *
2831 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2832 {
2833         struct i40e_veb *veb;
2834         int ret;
2835         struct i40e_hw *hw;
2836
2837         if (NULL == pf || vsi == NULL) {
2838                 PMD_DRV_LOG(ERR, "veb setup failed, "
2839                             "associated VSI shouldn't null");
2840                 return NULL;
2841         }
2842         hw = I40E_PF_TO_HW(pf);
2843
2844         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2845         if (!veb) {
2846                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
2847                 goto fail;
2848         }
2849
2850         veb->associate_vsi = vsi;
2851         TAILQ_INIT(&veb->head);
2852         veb->uplink_seid = vsi->uplink_seid;
2853
2854         ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2855                 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2856
2857         if (ret != I40E_SUCCESS) {
2858                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
2859                             hw->aq.asq_last_status);
2860                 goto fail;
2861         }
2862
2863         /* get statistics index */
2864         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2865                                 &veb->stats_idx, NULL, NULL, NULL);
2866         if (ret != I40E_SUCCESS) {
2867                 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
2868                             hw->aq.asq_last_status);
2869                 goto fail;
2870         }
2871
2872         /* Get VEB bandwidth, to be implemented */
2873         /* Now associated vsi binding to the VEB, set uplink to this VEB */
2874         vsi->uplink_seid = veb->seid;
2875
2876         return veb;
2877 fail:
2878         rte_free(veb);
2879         return NULL;
2880 }
2881
2882 int
2883 i40e_vsi_release(struct i40e_vsi *vsi)
2884 {
2885         struct i40e_pf *pf;
2886         struct i40e_hw *hw;
2887         struct i40e_vsi_list *vsi_list;
2888         int ret;
2889         struct i40e_mac_filter *f;
2890
2891         if (!vsi)
2892                 return I40E_SUCCESS;
2893
2894         pf = I40E_VSI_TO_PF(vsi);
2895         hw = I40E_VSI_TO_HW(vsi);
2896
2897         /* VSI has child to attach, release child first */
2898         if (vsi->veb) {
2899                 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2900                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2901                                 return -1;
2902                         TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2903                 }
2904                 i40e_veb_release(vsi->veb);
2905         }
2906
2907         /* Remove all macvlan filters of the VSI */
2908         i40e_vsi_remove_all_macvlan_filter(vsi);
2909         TAILQ_FOREACH(f, &vsi->mac_list, next)
2910                 rte_free(f);
2911
2912         if (vsi->type != I40E_VSI_MAIN) {
2913                 /* Remove vsi from parent's sibling list */
2914                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2915                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
2916                         return I40E_ERR_PARAM;
2917                 }
2918                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2919                                 &vsi->sib_vsi_list, list);
2920
2921                 /* Remove all switch element of the VSI */
2922                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2923                 if (ret != I40E_SUCCESS)
2924                         PMD_DRV_LOG(ERR, "Failed to delete element");
2925         }
2926         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2927
2928         if (vsi->type != I40E_VSI_SRIOV)
2929                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2930         rte_free(vsi);
2931
2932         return I40E_SUCCESS;
2933 }
2934
2935 static int
2936 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2937 {
2938         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2939         struct i40e_aqc_remove_macvlan_element_data def_filter;
2940         struct i40e_mac_filter_info filter;
2941         int ret;
2942
2943         if (vsi->type != I40E_VSI_MAIN)
2944                 return I40E_ERR_CONFIG;
2945         memset(&def_filter, 0, sizeof(def_filter));
2946         (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2947                                         ETH_ADDR_LEN);
2948         def_filter.vlan_tag = 0;
2949         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2950                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2951         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2952         if (ret != I40E_SUCCESS) {
2953                 struct i40e_mac_filter *f;
2954                 struct ether_addr *mac;
2955
2956                 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2957                             "macvlan filter");
2958                 /* It needs to add the permanent mac into mac list */
2959                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2960                 if (f == NULL) {
2961                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2962                         return I40E_ERR_NO_MEMORY;
2963                 }
2964                 mac = &f->mac_info.mac_addr;
2965                 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
2966                                 ETH_ADDR_LEN);
2967                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2968                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2969                 vsi->mac_num++;
2970
2971                 return ret;
2972         }
2973         (void)rte_memcpy(&filter.mac_addr,
2974                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
2975         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2976         return i40e_vsi_add_mac(vsi, &filter);
2977 }
2978
2979 static int
2980 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2981 {
2982         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2983         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2984         struct i40e_hw *hw = &vsi->adapter->hw;
2985         i40e_status ret;
2986         int i;
2987
2988         memset(&bw_config, 0, sizeof(bw_config));
2989         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2990         if (ret != I40E_SUCCESS) {
2991                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
2992                             hw->aq.asq_last_status);
2993                 return ret;
2994         }
2995
2996         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2997         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2998                                         &ets_sla_config, NULL);
2999         if (ret != I40E_SUCCESS) {
3000                 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
3001                             "configuration %u", hw->aq.asq_last_status);
3002                 return ret;
3003         }
3004
3005         /* Not store the info yet, just print out */
3006         PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
3007         PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
3008         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3009                 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
3010                             ets_sla_config.share_credits[i]);
3011                 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
3012                             rte_le_to_cpu_16(ets_sla_config.credits[i]));
3013                 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
3014                             rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
3015                             (i * 4));
3016         }
3017
3018         return 0;
3019 }
3020
3021 /* Setup a VSI */
3022 struct i40e_vsi *
3023 i40e_vsi_setup(struct i40e_pf *pf,
3024                enum i40e_vsi_type type,
3025                struct i40e_vsi *uplink_vsi,
3026                uint16_t user_param)
3027 {
3028         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3029         struct i40e_vsi *vsi;
3030         struct i40e_mac_filter_info filter;
3031         int ret;
3032         struct i40e_vsi_context ctxt;
3033         struct ether_addr broadcast =
3034                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
3035
3036         if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
3037                 PMD_DRV_LOG(ERR, "VSI setup failed, "
3038                             "VSI link shouldn't be NULL");
3039                 return NULL;
3040         }
3041
3042         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
3043                 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
3044                             "uplink VSI should be NULL");
3045                 return NULL;
3046         }
3047
3048         /* If uplink vsi didn't setup VEB, create one first */
3049         if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
3050                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
3051
3052                 if (NULL == uplink_vsi->veb) {
3053                         PMD_DRV_LOG(ERR, "VEB setup failed");
3054                         return NULL;
3055                 }
3056         }
3057
3058         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
3059         if (!vsi) {
3060                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
3061                 return NULL;
3062         }
3063         TAILQ_INIT(&vsi->mac_list);
3064         vsi->type = type;
3065         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
3066         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
3067         vsi->parent_vsi = uplink_vsi;
3068         vsi->user_param = user_param;
3069         /* Allocate queues */
3070         switch (vsi->type) {
3071         case I40E_VSI_MAIN  :
3072                 vsi->nb_qps = pf->lan_nb_qps;
3073                 break;
3074         case I40E_VSI_SRIOV :
3075                 vsi->nb_qps = pf->vf_nb_qps;
3076                 break;
3077         case I40E_VSI_VMDQ2:
3078                 vsi->nb_qps = pf->vmdq_nb_qps;
3079                 break;
3080         case I40E_VSI_FDIR:
3081                 vsi->nb_qps = pf->fdir_nb_qps;
3082                 break;
3083         default:
3084                 goto fail_mem;
3085         }
3086         /*
3087          * The filter status descriptor is reported in rx queue 0,
3088          * while the tx queue for fdir filter programming has no
3089          * such constraints, can be non-zero queues.
3090          * To simplify it, choose FDIR vsi use queue 0 pair.
3091          * To make sure it will use queue 0 pair, queue allocation
3092          * need be done before this function is called
3093          */
3094         if (type != I40E_VSI_FDIR) {
3095                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
3096                         if (ret < 0) {
3097                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
3098                                                 vsi->seid, ret);
3099                                 goto fail_mem;
3100                         }
3101                         vsi->base_queue = ret;
3102         } else
3103                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
3104
3105         /* VF has MSIX interrupt in VF range, don't allocate here */
3106         if (type != I40E_VSI_SRIOV) {
3107                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
3108                 if (ret < 0) {
3109                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
3110                         goto fail_queue_alloc;
3111                 }
3112                 vsi->msix_intr = ret;
3113         } else
3114                 vsi->msix_intr = 0;
3115         /* Add VSI */
3116         if (type == I40E_VSI_MAIN) {
3117                 /* For main VSI, no need to add since it's default one */
3118                 vsi->uplink_seid = pf->mac_seid;
3119                 vsi->seid = pf->main_vsi_seid;
3120                 /* Bind queues with specific MSIX interrupt */
3121                 /**
3122                  * Needs 2 interrupt at least, one for misc cause which will
3123                  * enabled from OS side, Another for queues binding the
3124                  * interrupt from device side only.
3125                  */
3126
3127                 /* Get default VSI parameters from hardware */
3128                 memset(&ctxt, 0, sizeof(ctxt));
3129                 ctxt.seid = vsi->seid;
3130                 ctxt.pf_num = hw->pf_id;
3131                 ctxt.uplink_seid = vsi->uplink_seid;
3132                 ctxt.vf_num = 0;
3133                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
3134                 if (ret != I40E_SUCCESS) {
3135                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
3136                         goto fail_msix_alloc;
3137                 }
3138                 (void)rte_memcpy(&vsi->info, &ctxt.info,
3139                         sizeof(struct i40e_aqc_vsi_properties_data));
3140                 vsi->vsi_id = ctxt.vsi_number;
3141                 vsi->info.valid_sections = 0;
3142
3143                 /* Configure tc, enabled TC0 only */
3144                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
3145                         I40E_SUCCESS) {
3146                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
3147                         goto fail_msix_alloc;
3148                 }
3149
3150                 /* TC, queue mapping */
3151                 memset(&ctxt, 0, sizeof(ctxt));
3152                 vsi->info.valid_sections |=
3153                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3154                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3155                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3156                 (void)rte_memcpy(&ctxt.info, &vsi->info,
3157                         sizeof(struct i40e_aqc_vsi_properties_data));
3158                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3159                                                 I40E_DEFAULT_TCMAP);
3160                 if (ret != I40E_SUCCESS) {
3161                         PMD_DRV_LOG(ERR, "Failed to configure "
3162                                     "TC queue mapping");
3163                         goto fail_msix_alloc;
3164                 }
3165                 ctxt.seid = vsi->seid;
3166                 ctxt.pf_num = hw->pf_id;
3167                 ctxt.uplink_seid = vsi->uplink_seid;
3168                 ctxt.vf_num = 0;
3169
3170                 /* Update VSI parameters */
3171                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3172                 if (ret != I40E_SUCCESS) {
3173                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
3174                         goto fail_msix_alloc;
3175                 }
3176
3177                 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
3178                                                 sizeof(vsi->info.tc_mapping));
3179                 (void)rte_memcpy(&vsi->info.queue_mapping,
3180                                 &ctxt.info.queue_mapping,
3181                         sizeof(vsi->info.queue_mapping));
3182                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
3183                 vsi->info.valid_sections = 0;
3184
3185                 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
3186                                 ETH_ADDR_LEN);
3187
3188                 /**
3189                  * Updating default filter settings are necessary to prevent
3190                  * reception of tagged packets.
3191                  * Some old firmware configurations load a default macvlan
3192                  * filter which accepts both tagged and untagged packets.
3193                  * The updating is to use a normal filter instead if needed.
3194                  * For NVM 4.2.2 or after, the updating is not needed anymore.
3195                  * The firmware with correct configurations load the default
3196                  * macvlan filter which is expected and cannot be removed.
3197                  */
3198                 i40e_update_default_filter_setting(vsi);
3199                 i40e_config_qinq(hw, vsi);
3200         } else if (type == I40E_VSI_SRIOV) {
3201                 memset(&ctxt, 0, sizeof(ctxt));
3202                 /**
3203                  * For other VSI, the uplink_seid equals to uplink VSI's
3204                  * uplink_seid since they share same VEB
3205                  */
3206                 vsi->uplink_seid = uplink_vsi->uplink_seid;
3207                 ctxt.pf_num = hw->pf_id;
3208                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
3209                 ctxt.uplink_seid = vsi->uplink_seid;
3210                 ctxt.connection_type = 0x1;
3211                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
3212
3213                 /**
3214                  * Do not configure switch ID to enable VEB switch by
3215                  * I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB. Because in Fortville,
3216                  * if the source mac address of packet sent from VF is not
3217                  * listed in the VEB's mac table, the VEB will switch the
3218                  * packet back to the VF. Need to enable it when HW issue
3219                  * is fixed.
3220                  */
3221
3222                 /* Configure port/vlan */
3223                 ctxt.info.valid_sections |=
3224                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3225                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
3226                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3227                                                 I40E_DEFAULT_TCMAP);
3228                 if (ret != I40E_SUCCESS) {
3229                         PMD_DRV_LOG(ERR, "Failed to configure "
3230                                     "TC queue mapping");
3231                         goto fail_msix_alloc;
3232                 }
3233                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3234                 ctxt.info.valid_sections |=
3235                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3236                 /**
3237                  * Since VSI is not created yet, only configure parameter,
3238                  * will add vsi below.
3239                  */
3240
3241                 i40e_config_qinq(hw, vsi);
3242         } else if (type == I40E_VSI_VMDQ2) {
3243                 memset(&ctxt, 0, sizeof(ctxt));
3244                 /*
3245                  * For other VSI, the uplink_seid equals to uplink VSI's
3246                  * uplink_seid since they share same VEB
3247                  */
3248                 vsi->uplink_seid = uplink_vsi->uplink_seid;
3249                 ctxt.pf_num = hw->pf_id;
3250                 ctxt.vf_num = 0;
3251                 ctxt.uplink_seid = vsi->uplink_seid;
3252                 ctxt.connection_type = 0x1;
3253                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
3254
3255                 ctxt.info.valid_sections |=
3256                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
3257                 /* user_param carries flag to enable loop back */
3258                 if (user_param) {
3259                         ctxt.info.switch_id =
3260                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
3261                         ctxt.info.switch_id |=
3262                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
3263                 }
3264
3265                 /* Configure port/vlan */
3266                 ctxt.info.valid_sections |=
3267                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3268                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
3269                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3270                                                 I40E_DEFAULT_TCMAP);
3271                 if (ret != I40E_SUCCESS) {
3272                         PMD_DRV_LOG(ERR, "Failed to configure "
3273                                         "TC queue mapping");
3274                         goto fail_msix_alloc;
3275                 }
3276                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3277                 ctxt.info.valid_sections |=
3278                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3279         } else if (type == I40E_VSI_FDIR) {
3280                 memset(&ctxt, 0, sizeof(ctxt));
3281                 vsi->uplink_seid = uplink_vsi->uplink_seid;
3282                 ctxt.pf_num = hw->pf_id;
3283                 ctxt.vf_num = 0;
3284                 ctxt.uplink_seid = vsi->uplink_seid;
3285                 ctxt.connection_type = 0x1;     /* regular data port */
3286                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
3287                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3288                                                 I40E_DEFAULT_TCMAP);
3289                 if (ret != I40E_SUCCESS) {
3290                         PMD_DRV_LOG(ERR, "Failed to configure "
3291                                         "TC queue mapping.");
3292                         goto fail_msix_alloc;
3293                 }
3294                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3295                 ctxt.info.valid_sections |=
3296                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3297         } else {
3298                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
3299                 goto fail_msix_alloc;
3300         }
3301
3302         if (vsi->type != I40E_VSI_MAIN) {
3303                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
3304                 if (ret != I40E_SUCCESS) {
3305                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
3306                                     hw->aq.asq_last_status);
3307                         goto fail_msix_alloc;
3308                 }
3309                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
3310                 vsi->info.valid_sections = 0;
3311                 vsi->seid = ctxt.seid;
3312                 vsi->vsi_id = ctxt.vsi_number;
3313                 vsi->sib_vsi_list.vsi = vsi;
3314                 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
3315                                 &vsi->sib_vsi_list, list);
3316         }
3317
3318         /* MAC/VLAN configuration */
3319         (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
3320         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3321
3322         ret = i40e_vsi_add_mac(vsi, &filter);
3323         if (ret != I40E_SUCCESS) {
3324                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3325                 goto fail_msix_alloc;
3326         }
3327
3328         /* Get VSI BW information */
3329         i40e_vsi_dump_bw_config(vsi);
3330         return vsi;
3331 fail_msix_alloc:
3332         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
3333 fail_queue_alloc:
3334         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
3335 fail_mem:
3336         rte_free(vsi);
3337         return NULL;
3338 }
3339
3340 /* Configure vlan stripping on or off */
3341 int
3342 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
3343 {
3344         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3345         struct i40e_vsi_context ctxt;
3346         uint8_t vlan_flags;
3347         int ret = I40E_SUCCESS;
3348
3349         /* Check if it has been already on or off */
3350         if (vsi->info.valid_sections &
3351                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
3352                 if (on) {
3353                         if ((vsi->info.port_vlan_flags &
3354                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
3355                                 return 0; /* already on */
3356                 } else {
3357                         if ((vsi->info.port_vlan_flags &
3358                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3359                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
3360                                 return 0; /* already off */
3361                 }
3362         }
3363
3364         if (on)
3365                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3366         else
3367                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3368         vsi->info.valid_sections =
3369                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3370         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
3371         vsi->info.port_vlan_flags |= vlan_flags;
3372         ctxt.seid = vsi->seid;
3373         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3374         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3375         if (ret)
3376                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
3377                             on ? "enable" : "disable");
3378
3379         return ret;
3380 }
3381
3382 static int
3383 i40e_dev_init_vlan(struct rte_eth_dev *dev)
3384 {
3385         struct rte_eth_dev_data *data = dev->data;
3386         int ret;
3387
3388         /* Apply vlan offload setting */
3389         i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
3390
3391         /* Apply double-vlan setting, not implemented yet */
3392
3393         /* Apply pvid setting */
3394         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
3395                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
3396         if (ret)
3397                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
3398
3399         return ret;
3400 }
3401
3402 static int
3403 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
3404 {
3405         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3406
3407         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
3408 }
3409
3410 static int
3411 i40e_update_flow_control(struct i40e_hw *hw)
3412 {
3413 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
3414         struct i40e_link_status link_status;
3415         uint32_t rxfc = 0, txfc = 0, reg;
3416         uint8_t an_info;
3417         int ret;
3418
3419         memset(&link_status, 0, sizeof(link_status));
3420         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
3421         if (ret != I40E_SUCCESS) {
3422                 PMD_DRV_LOG(ERR, "Failed to get link status information");
3423                 goto write_reg; /* Disable flow control */
3424         }
3425
3426         an_info = hw->phy.link_info.an_info;
3427         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
3428                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
3429                 ret = I40E_ERR_NOT_READY;
3430                 goto write_reg; /* Disable flow control */
3431         }
3432         /**
3433          * If link auto negotiation is enabled, flow control needs to
3434          * be configured according to it
3435          */
3436         switch (an_info & I40E_LINK_PAUSE_RXTX) {
3437         case I40E_LINK_PAUSE_RXTX:
3438                 rxfc = 1;
3439                 txfc = 1;
3440                 hw->fc.current_mode = I40E_FC_FULL;
3441                 break;
3442         case I40E_AQ_LINK_PAUSE_RX:
3443                 rxfc = 1;
3444                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
3445                 break;
3446         case I40E_AQ_LINK_PAUSE_TX:
3447                 txfc = 1;
3448                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
3449                 break;
3450         default:
3451                 hw->fc.current_mode = I40E_FC_NONE;
3452                 break;
3453         }
3454
3455 write_reg:
3456         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
3457                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
3458         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3459         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
3460         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
3461         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
3462
3463         return ret;
3464 }
3465
3466 /* PF setup */
3467 static int
3468 i40e_pf_setup(struct i40e_pf *pf)
3469 {
3470         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3471         struct i40e_filter_control_settings settings;
3472         struct i40e_vsi *vsi;
3473         int ret;
3474
3475         /* Clear all stats counters */
3476         pf->offset_loaded = FALSE;
3477         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
3478         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
3479
3480         ret = i40e_pf_get_switch_config(pf);
3481         if (ret != I40E_SUCCESS) {
3482                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
3483                 return ret;
3484         }
3485         if (pf->flags & I40E_FLAG_FDIR) {
3486                 /* make queue allocated first, let FDIR use queue pair 0*/
3487                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
3488                 if (ret != I40E_FDIR_QUEUE_ID) {
3489                         PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
3490                                     " ret =%d", ret);
3491                         pf->flags &= ~I40E_FLAG_FDIR;
3492                 }
3493         }
3494         /*  main VSI setup */
3495         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
3496         if (!vsi) {
3497                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
3498                 return I40E_ERR_NOT_READY;
3499         }
3500         pf->main_vsi = vsi;
3501
3502         /* Configure filter control */
3503         memset(&settings, 0, sizeof(settings));
3504         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
3505                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
3506         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
3507                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
3508         else {
3509                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
3510                                                 hw->func_caps.rss_table_size);
3511                 return I40E_ERR_PARAM;
3512         }
3513         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
3514                         "size: %u\n", hw->func_caps.rss_table_size);
3515         pf->hash_lut_size = hw->func_caps.rss_table_size;
3516
3517         /* Enable ethtype and macvlan filters */
3518         settings.enable_ethtype = TRUE;
3519         settings.enable_macvlan = TRUE;
3520         ret = i40e_set_filter_control(hw, &settings);
3521         if (ret)
3522                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
3523                                                                 ret);
3524
3525         /* Update flow control according to the auto negotiation */
3526         i40e_update_flow_control(hw);
3527
3528         return I40E_SUCCESS;
3529 }
3530
3531 int
3532 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3533 {
3534         uint32_t reg;
3535         uint16_t j;
3536
3537         /**
3538          * Set or clear TX Queue Disable flags,
3539          * which is required by hardware.
3540          */
3541         i40e_pre_tx_queue_cfg(hw, q_idx, on);
3542         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
3543
3544         /* Wait until the request is finished */
3545         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3546                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3547                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3548                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3549                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
3550                                                         & 0x1))) {
3551                         break;
3552                 }
3553         }
3554         if (on) {
3555                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3556                         return I40E_SUCCESS; /* already on, skip next steps */
3557
3558                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
3559                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3560         } else {
3561                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3562                         return I40E_SUCCESS; /* already off, skip next steps */
3563                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3564         }
3565         /* Write the register */
3566         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3567         /* Check the result */
3568         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3569                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3570                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3571                 if (on) {
3572                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3573                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
3574                                 break;
3575                 } else {
3576                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3577                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3578                                 break;
3579                 }
3580         }
3581         /* Check if it is timeout */
3582         if (j >= I40E_CHK_Q_ENA_COUNT) {
3583                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
3584                             (on ? "enable" : "disable"), q_idx);
3585                 return I40E_ERR_TIMEOUT;
3586         }
3587
3588         return I40E_SUCCESS;
3589 }
3590
3591 /* Swith on or off the tx queues */
3592 static int
3593 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
3594 {
3595         struct rte_eth_dev_data *dev_data = pf->dev_data;
3596         struct i40e_tx_queue *txq;
3597         struct rte_eth_dev *dev = pf->adapter->eth_dev;
3598         uint16_t i;
3599         int ret;
3600
3601         for (i = 0; i < dev_data->nb_tx_queues; i++) {
3602                 txq = dev_data->tx_queues[i];
3603                 /* Don't operate the queue if not configured or
3604                  * if starting only per queue */
3605                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
3606                         continue;
3607                 if (on)
3608                         ret = i40e_dev_tx_queue_start(dev, i);
3609                 else
3610                         ret = i40e_dev_tx_queue_stop(dev, i);
3611                 if ( ret != I40E_SUCCESS)
3612                         return ret;
3613         }
3614
3615         return I40E_SUCCESS;
3616 }
3617
3618 int
3619 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3620 {
3621         uint32_t reg;
3622         uint16_t j;
3623
3624         /* Wait until the request is finished */
3625         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3626                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3627                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3628                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3629                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3630                         break;
3631         }
3632
3633         if (on) {
3634                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3635                         return I40E_SUCCESS; /* Already on, skip next steps */
3636                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3637         } else {
3638                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3639                         return I40E_SUCCESS; /* Already off, skip next steps */
3640                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3641         }
3642
3643         /* Write the register */
3644         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3645         /* Check the result */
3646         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3647                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3648                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3649                 if (on) {
3650                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3651                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3652                                 break;
3653                 } else {
3654                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3655                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3656                                 break;
3657                 }
3658         }
3659
3660         /* Check if it is timeout */
3661         if (j >= I40E_CHK_Q_ENA_COUNT) {
3662                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
3663                             (on ? "enable" : "disable"), q_idx);
3664                 return I40E_ERR_TIMEOUT;
3665         }
3666
3667         return I40E_SUCCESS;
3668 }
3669 /* Switch on or off the rx queues */
3670 static int
3671 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
3672 {
3673         struct rte_eth_dev_data *dev_data = pf->dev_data;
3674         struct i40e_rx_queue *rxq;
3675         struct rte_eth_dev *dev = pf->adapter->eth_dev;
3676         uint16_t i;
3677         int ret;
3678
3679         for (i = 0; i < dev_data->nb_rx_queues; i++) {
3680                 rxq = dev_data->rx_queues[i];
3681                 /* Don't operate the queue if not configured or
3682                  * if starting only per queue */
3683                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
3684                         continue;
3685                 if (on)
3686                         ret = i40e_dev_rx_queue_start(dev, i);
3687                 else
3688                         ret = i40e_dev_rx_queue_stop(dev, i);
3689                 if (ret != I40E_SUCCESS)
3690                         return ret;
3691         }
3692
3693         return I40E_SUCCESS;
3694 }
3695
3696 /* Switch on or off all the rx/tx queues */
3697 int
3698 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
3699 {
3700         int ret;
3701
3702         if (on) {
3703                 /* enable rx queues before enabling tx queues */
3704                 ret = i40e_dev_switch_rx_queues(pf, on);
3705                 if (ret) {
3706                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
3707                         return ret;
3708                 }
3709                 ret = i40e_dev_switch_tx_queues(pf, on);
3710         } else {
3711                 /* Stop tx queues before stopping rx queues */
3712                 ret = i40e_dev_switch_tx_queues(pf, on);
3713                 if (ret) {
3714                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
3715                         return ret;
3716                 }
3717                 ret = i40e_dev_switch_rx_queues(pf, on);
3718         }
3719
3720         return ret;
3721 }
3722
3723 /* Initialize VSI for TX */
3724 static int
3725 i40e_dev_tx_init(struct i40e_pf *pf)
3726 {
3727         struct rte_eth_dev_data *data = pf->dev_data;
3728         uint16_t i;
3729         uint32_t ret = I40E_SUCCESS;
3730         struct i40e_tx_queue *txq;
3731
3732         for (i = 0; i < data->nb_tx_queues; i++) {
3733                 txq = data->tx_queues[i];
3734                 if (!txq || !txq->q_set)
3735                         continue;
3736                 ret = i40e_tx_queue_init(txq);
3737                 if (ret != I40E_SUCCESS)
3738                         break;
3739         }
3740
3741         return ret;
3742 }
3743
3744 /* Initialize VSI for RX */
3745 static int
3746 i40e_dev_rx_init(struct i40e_pf *pf)
3747 {
3748         struct rte_eth_dev_data *data = pf->dev_data;
3749         int ret = I40E_SUCCESS;
3750         uint16_t i;
3751         struct i40e_rx_queue *rxq;
3752
3753         i40e_pf_config_mq_rx(pf);
3754         for (i = 0; i < data->nb_rx_queues; i++) {
3755                 rxq = data->rx_queues[i];
3756                 if (!rxq || !rxq->q_set)
3757                         continue;
3758
3759                 ret = i40e_rx_queue_init(rxq);
3760                 if (ret != I40E_SUCCESS) {
3761                         PMD_DRV_LOG(ERR, "Failed to do RX queue "
3762                                     "initialization");
3763                         break;
3764                 }
3765         }
3766
3767         return ret;
3768 }
3769
3770 static int
3771 i40e_dev_rxtx_init(struct i40e_pf *pf)
3772 {
3773         int err;
3774
3775         err = i40e_dev_tx_init(pf);
3776         if (err) {
3777                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
3778                 return err;
3779         }
3780         err = i40e_dev_rx_init(pf);
3781         if (err) {
3782                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
3783                 return err;
3784         }
3785
3786         return err;
3787 }
3788
3789 static int
3790 i40e_vmdq_setup(struct rte_eth_dev *dev)
3791 {
3792         struct rte_eth_conf *conf = &dev->data->dev_conf;
3793         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3794         int i, err, conf_vsis, j, loop;
3795         struct i40e_vsi *vsi;
3796         struct i40e_vmdq_info *vmdq_info;
3797         struct rte_eth_vmdq_rx_conf *vmdq_conf;
3798         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3799
3800         /*
3801          * Disable interrupt to avoid message from VF. Furthermore, it will
3802          * avoid race condition in VSI creation/destroy.
3803          */
3804         i40e_pf_disable_irq0(hw);
3805
3806         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
3807                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
3808                 return -ENOTSUP;
3809         }
3810
3811         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
3812         if (conf_vsis > pf->max_nb_vmdq_vsi) {
3813                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
3814                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
3815                         pf->max_nb_vmdq_vsi);
3816                 return -ENOTSUP;
3817         }
3818
3819         if (pf->vmdq != NULL) {
3820                 PMD_INIT_LOG(INFO, "VMDQ already configured");
3821                 return 0;
3822         }
3823
3824         pf->vmdq = rte_zmalloc("vmdq_info_struct",
3825                                 sizeof(*vmdq_info) * conf_vsis, 0);
3826
3827         if (pf->vmdq == NULL) {
3828                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
3829                 return -ENOMEM;
3830         }
3831
3832         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
3833
3834         /* Create VMDQ VSI */
3835         for (i = 0; i < conf_vsis; i++) {
3836                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
3837                                 vmdq_conf->enable_loop_back);
3838                 if (vsi == NULL) {
3839                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
3840                         err = -1;
3841                         goto err_vsi_setup;
3842                 }
3843                 vmdq_info = &pf->vmdq[i];
3844                 vmdq_info->pf = pf;
3845                 vmdq_info->vsi = vsi;
3846         }
3847         pf->nb_cfg_vmdq_vsi = conf_vsis;
3848
3849         /* Configure Vlan */
3850         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
3851         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
3852                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
3853                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
3854                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
3855                                         vmdq_conf->pool_map[i].vlan_id, j);
3856
3857                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
3858                                                 vmdq_conf->pool_map[i].vlan_id);
3859                                 if (err) {
3860                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
3861                                         err = -1;
3862                                         goto err_vsi_setup;
3863                                 }
3864                         }
3865                 }
3866         }
3867
3868         i40e_pf_enable_irq0(hw);
3869
3870         return 0;
3871
3872 err_vsi_setup:
3873         for (i = 0; i < conf_vsis; i++)
3874                 if (pf->vmdq[i].vsi == NULL)
3875                         break;
3876                 else
3877                         i40e_vsi_release(pf->vmdq[i].vsi);
3878
3879         rte_free(pf->vmdq);
3880         pf->vmdq = NULL;
3881         i40e_pf_enable_irq0(hw);
3882         return err;
3883 }
3884
3885 static void
3886 i40e_stat_update_32(struct i40e_hw *hw,
3887                    uint32_t reg,
3888                    bool offset_loaded,
3889                    uint64_t *offset,
3890                    uint64_t *stat)
3891 {
3892         uint64_t new_data;
3893
3894         new_data = (uint64_t)I40E_READ_REG(hw, reg);
3895         if (!offset_loaded)
3896                 *offset = new_data;
3897
3898         if (new_data >= *offset)
3899                 *stat = (uint64_t)(new_data - *offset);
3900         else
3901                 *stat = (uint64_t)((new_data +
3902                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
3903 }
3904
3905 static void
3906 i40e_stat_update_48(struct i40e_hw *hw,
3907                    uint32_t hireg,
3908                    uint32_t loreg,
3909                    bool offset_loaded,
3910                    uint64_t *offset,
3911                    uint64_t *stat)
3912 {
3913         uint64_t new_data;
3914
3915         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3916         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3917                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
3918
3919         if (!offset_loaded)
3920                 *offset = new_data;
3921
3922         if (new_data >= *offset)
3923                 *stat = new_data - *offset;
3924         else
3925                 *stat = (uint64_t)((new_data +
3926                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
3927
3928         *stat &= I40E_48_BIT_MASK;
3929 }
3930
3931 /* Disable IRQ0 */
3932 void
3933 i40e_pf_disable_irq0(struct i40e_hw *hw)
3934 {
3935         /* Disable all interrupt types */
3936         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3937         I40E_WRITE_FLUSH(hw);
3938 }
3939
3940 /* Enable IRQ0 */
3941 void
3942 i40e_pf_enable_irq0(struct i40e_hw *hw)
3943 {
3944         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3945                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3946                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3947                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3948         I40E_WRITE_FLUSH(hw);
3949 }
3950
3951 static void
3952 i40e_pf_config_irq0(struct i40e_hw *hw)
3953 {
3954         /* read pending request and disable first */
3955         i40e_pf_disable_irq0(hw);
3956         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
3957         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3958                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3959
3960         /* Link no queues with irq0 */
3961         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3962                 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3963 }
3964
3965 static void
3966 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3967 {
3968         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3969         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3970         int i;
3971         uint16_t abs_vf_id;
3972         uint32_t index, offset, val;
3973
3974         if (!pf->vfs)
3975                 return;
3976         /**
3977          * Try to find which VF trigger a reset, use absolute VF id to access
3978          * since the reg is global register.
3979          */
3980         for (i = 0; i < pf->vf_num; i++) {
3981                 abs_vf_id = hw->func_caps.vf_base_id + i;
3982                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3983                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3984                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3985                 /* VFR event occured */
3986                 if (val & (0x1 << offset)) {
3987                         int ret;
3988
3989                         /* Clear the event first */
3990                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3991                                                         (0x1 << offset));
3992                         PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
3993                         /**
3994                          * Only notify a VF reset event occured,
3995                          * don't trigger another SW reset
3996                          */
3997                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3998                         if (ret != I40E_SUCCESS)
3999                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
4000                 }
4001         }
4002 }
4003
4004 static void
4005 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
4006 {
4007         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4008         struct i40e_arq_event_info info;
4009         uint16_t pending, opcode;
4010         int ret;
4011
4012         info.buf_len = I40E_AQ_BUF_SZ;
4013         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
4014         if (!info.msg_buf) {
4015                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
4016                 return;
4017         }
4018
4019         pending = 1;
4020         while (pending) {
4021                 ret = i40e_clean_arq_element(hw, &info, &pending);
4022
4023                 if (ret != I40E_SUCCESS) {
4024                         PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
4025                                     "aq_err: %u", hw->aq.asq_last_status);
4026                         break;
4027                 }
4028                 opcode = rte_le_to_cpu_16(info.desc.opcode);
4029
4030                 switch (opcode) {
4031                 case i40e_aqc_opc_send_msg_to_pf:
4032                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
4033                         i40e_pf_host_handle_vf_msg(dev,
4034                                         rte_le_to_cpu_16(info.desc.retval),
4035                                         rte_le_to_cpu_32(info.desc.cookie_high),
4036                                         rte_le_to_cpu_32(info.desc.cookie_low),
4037                                         info.msg_buf,
4038                                         info.msg_len);
4039                         break;
4040                 default:
4041                         PMD_DRV_LOG(ERR, "Request %u is not supported yet",
4042                                     opcode);
4043                         break;
4044                 }
4045         }
4046         rte_free(info.msg_buf);
4047 }
4048
4049 /*
4050  * Interrupt handler is registered as the alarm callback for handling LSC
4051  * interrupt in a definite of time, in order to wait the NIC into a stable
4052  * state. Currently it waits 1 sec in i40e for the link up interrupt, and
4053  * no need for link down interrupt.
4054  */
4055 static void
4056 i40e_dev_interrupt_delayed_handler(void *param)
4057 {
4058         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4059         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4060         uint32_t icr0;
4061
4062         /* read interrupt causes again */
4063         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
4064
4065 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
4066         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
4067                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
4068         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
4069                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
4070         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
4071                 PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
4072         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
4073                 PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
4074         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
4075                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
4076                                                                 "state\n");
4077         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
4078                 PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
4079         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
4080                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
4081 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
4082
4083         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4084                 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
4085                 i40e_dev_handle_vfr_event(dev);
4086         }
4087         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4088                 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
4089                 i40e_dev_handle_aq_msg(dev);
4090         }
4091
4092         /* handle the link up interrupt in an alarm callback */
4093         i40e_dev_link_update(dev, 0);
4094         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
4095
4096         i40e_pf_enable_irq0(hw);
4097         rte_intr_enable(&(dev->pci_dev->intr_handle));
4098 }
4099
4100 /**
4101  * Interrupt handler triggered by NIC  for handling
4102  * specific interrupt.
4103  *
4104  * @param handle
4105  *  Pointer to interrupt handle.
4106  * @param param
4107  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4108  *
4109  * @return
4110  *  void
4111  */
4112 static void
4113 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
4114                            void *param)
4115 {
4116         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4117         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4118         uint32_t icr0;
4119
4120         /* Disable interrupt */
4121         i40e_pf_disable_irq0(hw);
4122
4123         /* read out interrupt causes */
4124         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
4125
4126         /* No interrupt event indicated */
4127         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
4128                 PMD_DRV_LOG(INFO, "No interrupt event");
4129                 goto done;
4130         }
4131 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
4132         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
4133                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
4134         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
4135                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
4136         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
4137                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
4138         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
4139                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
4140         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
4141                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
4142         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
4143                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
4144         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
4145                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
4146 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
4147
4148         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4149                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
4150                 i40e_dev_handle_vfr_event(dev);
4151         }
4152         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4153                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
4154                 i40e_dev_handle_aq_msg(dev);
4155         }
4156
4157         /* Link Status Change interrupt */
4158         if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
4159 #define I40E_US_PER_SECOND 1000000
4160                 struct rte_eth_link link;
4161
4162                 PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
4163                 memset(&link, 0, sizeof(link));
4164                 rte_i40e_dev_atomic_read_link_status(dev, &link);
4165                 i40e_dev_link_update(dev, 0);
4166
4167                 /*
4168                  * For link up interrupt, it needs to wait 1 second to let the
4169                  * hardware be a stable state. Otherwise several consecutive
4170                  * interrupts can be observed.
4171                  * For link down interrupt, no need to wait.
4172                  */
4173                 if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
4174                         i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
4175                         return;
4176                 else
4177                         _rte_eth_dev_callback_process(dev,
4178                                 RTE_ETH_EVENT_INTR_LSC);
4179         }
4180
4181 done:
4182         /* Enable interrupt */
4183         i40e_pf_enable_irq0(hw);
4184         rte_intr_enable(&(dev->pci_dev->intr_handle));
4185 }
4186
4187 static int
4188 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
4189                          struct i40e_macvlan_filter *filter,
4190                          int total)
4191 {
4192         int ele_num, ele_buff_size;
4193         int num, actual_num, i;
4194         uint16_t flags;
4195         int ret = I40E_SUCCESS;
4196         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4197         struct i40e_aqc_add_macvlan_element_data *req_list;
4198
4199         if (filter == NULL  || total == 0)
4200                 return I40E_ERR_PARAM;
4201         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
4202         ele_buff_size = hw->aq.asq_buf_size;
4203
4204         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
4205         if (req_list == NULL) {
4206                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
4207                 return I40E_ERR_NO_MEMORY;
4208         }
4209
4210         num = 0;
4211         do {
4212                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
4213                 memset(req_list, 0, ele_buff_size);
4214
4215                 for (i = 0; i < actual_num; i++) {
4216                         (void)rte_memcpy(req_list[i].mac_addr,
4217                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
4218                         req_list[i].vlan_tag =
4219                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
4220
4221                         switch (filter[num + i].filter_type) {
4222                         case RTE_MAC_PERFECT_MATCH:
4223                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
4224                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
4225                                 break;
4226                         case RTE_MACVLAN_PERFECT_MATCH:
4227                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
4228                                 break;
4229                         case RTE_MAC_HASH_MATCH:
4230                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
4231                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
4232                                 break;
4233                         case RTE_MACVLAN_HASH_MATCH:
4234                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
4235                                 break;
4236                         default:
4237                                 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
4238                                 ret = I40E_ERR_PARAM;
4239                                 goto DONE;
4240                         }
4241
4242                         req_list[i].queue_number = 0;
4243
4244                         req_list[i].flags = rte_cpu_to_le_16(flags);
4245                 }
4246
4247                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
4248                                                 actual_num, NULL);
4249                 if (ret != I40E_SUCCESS) {
4250                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
4251                         goto DONE;
4252                 }
4253                 num += actual_num;
4254         } while (num < total);
4255
4256 DONE:
4257         rte_free(req_list);
4258         return ret;
4259 }
4260
4261 static int
4262 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
4263                             struct i40e_macvlan_filter *filter,
4264                             int total)
4265 {
4266         int ele_num, ele_buff_size;
4267         int num, actual_num, i;
4268         uint16_t flags;
4269         int ret = I40E_SUCCESS;
4270         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4271         struct i40e_aqc_remove_macvlan_element_data *req_list;
4272
4273         if (filter == NULL  || total == 0)
4274                 return I40E_ERR_PARAM;
4275
4276         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
4277         ele_buff_size = hw->aq.asq_buf_size;
4278
4279         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
4280         if (req_list == NULL) {
4281                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
4282                 return I40E_ERR_NO_MEMORY;
4283         }
4284
4285         num = 0;
4286         do {
4287                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
4288                 memset(req_list, 0, ele_buff_size);
4289
4290                 for (i = 0; i < actual_num; i++) {
4291                         (void)rte_memcpy(req_list[i].mac_addr,
4292                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
4293                         req_list[i].vlan_tag =
4294                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
4295
4296                         switch (filter[num + i].filter_type) {
4297                         case RTE_MAC_PERFECT_MATCH:
4298                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4299                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4300                                 break;
4301                         case RTE_MACVLAN_PERFECT_MATCH:
4302                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
4303                                 break;
4304                         case RTE_MAC_HASH_MATCH:
4305                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
4306                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4307                                 break;
4308                         case RTE_MACVLAN_HASH_MATCH:
4309                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
4310                                 break;
4311                         default:
4312                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
4313                                 ret = I40E_ERR_PARAM;
4314                                 goto DONE;
4315                         }
4316                         req_list[i].flags = rte_cpu_to_le_16(flags);
4317                 }
4318
4319                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
4320                                                 actual_num, NULL);
4321                 if (ret != I40E_SUCCESS) {
4322                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
4323                         goto DONE;
4324                 }
4325                 num += actual_num;
4326         } while (num < total);
4327
4328 DONE:
4329         rte_free(req_list);
4330         return ret;
4331 }
4332
4333 /* Find out specific MAC filter */
4334 static struct i40e_mac_filter *
4335 i40e_find_mac_filter(struct i40e_vsi *vsi,
4336                          struct ether_addr *macaddr)
4337 {
4338         struct i40e_mac_filter *f;
4339
4340         TAILQ_FOREACH(f, &vsi->mac_list, next) {
4341                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
4342                         return f;
4343         }
4344
4345         return NULL;
4346 }
4347
4348 static bool
4349 i40e_find_vlan_filter(struct i40e_vsi *vsi,
4350                          uint16_t vlan_id)
4351 {
4352         uint32_t vid_idx, vid_bit;
4353
4354         if (vlan_id > ETH_VLAN_ID_MAX)
4355                 return 0;
4356
4357         vid_idx = I40E_VFTA_IDX(vlan_id);
4358         vid_bit = I40E_VFTA_BIT(vlan_id);
4359
4360         if (vsi->vfta[vid_idx] & vid_bit)
4361                 return 1;
4362         else
4363                 return 0;
4364 }
4365
4366 static void
4367 i40e_set_vlan_filter(struct i40e_vsi *vsi,
4368                          uint16_t vlan_id, bool on)
4369 {
4370         uint32_t vid_idx, vid_bit;
4371
4372         if (vlan_id > ETH_VLAN_ID_MAX)
4373                 return;
4374
4375         vid_idx = I40E_VFTA_IDX(vlan_id);
4376         vid_bit = I40E_VFTA_BIT(vlan_id);
4377
4378         if (on)
4379                 vsi->vfta[vid_idx] |= vid_bit;
4380         else
4381                 vsi->vfta[vid_idx] &= ~vid_bit;
4382 }
4383
4384 /**
4385  * Find all vlan options for specific mac addr,
4386  * return with actual vlan found.
4387  */
4388 static inline int
4389 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
4390                            struct i40e_macvlan_filter *mv_f,
4391                            int num, struct ether_addr *addr)
4392 {
4393         int i;
4394         uint32_t j, k;
4395
4396         /**
4397          * Not to use i40e_find_vlan_filter to decrease the loop time,
4398          * although the code looks complex.
4399           */
4400         if (num < vsi->vlan_num)
4401                 return I40E_ERR_PARAM;
4402
4403         i = 0;
4404         for (j = 0; j < I40E_VFTA_SIZE; j++) {
4405                 if (vsi->vfta[j]) {
4406                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
4407                                 if (vsi->vfta[j] & (1 << k)) {
4408                                         if (i > num - 1) {
4409                                                 PMD_DRV_LOG(ERR, "vlan number "
4410                                                             "not match");
4411                                                 return I40E_ERR_PARAM;
4412                                         }
4413                                         (void)rte_memcpy(&mv_f[i].macaddr,
4414                                                         addr, ETH_ADDR_LEN);
4415                                         mv_f[i].vlan_id =
4416                                                 j * I40E_UINT32_BIT_SIZE + k;
4417                                         i++;
4418                                 }
4419                         }
4420                 }
4421         }
4422         return I40E_SUCCESS;
4423 }
4424
4425 static inline int
4426 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
4427                            struct i40e_macvlan_filter *mv_f,
4428                            int num,
4429                            uint16_t vlan)
4430 {
4431         int i = 0;
4432         struct i40e_mac_filter *f;
4433
4434         if (num < vsi->mac_num)
4435                 return I40E_ERR_PARAM;
4436
4437         TAILQ_FOREACH(f, &vsi->mac_list, next) {
4438                 if (i > num - 1) {
4439                         PMD_DRV_LOG(ERR, "buffer number not match");
4440                         return I40E_ERR_PARAM;
4441                 }
4442                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4443                                 ETH_ADDR_LEN);
4444                 mv_f[i].vlan_id = vlan;
4445                 mv_f[i].filter_type = f->mac_info.filter_type;
4446                 i++;
4447         }
4448
4449         return I40E_SUCCESS;
4450 }
4451
4452 static int
4453 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
4454 {
4455         int i, num;
4456         struct i40e_mac_filter *f;
4457         struct i40e_macvlan_filter *mv_f;
4458         int ret = I40E_SUCCESS;
4459
4460         if (vsi == NULL || vsi->mac_num == 0)
4461                 return I40E_ERR_PARAM;
4462
4463         /* Case that no vlan is set */
4464         if (vsi->vlan_num == 0)
4465                 num = vsi->mac_num;
4466         else
4467                 num = vsi->mac_num * vsi->vlan_num;
4468
4469         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
4470         if (mv_f == NULL) {
4471                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4472                 return I40E_ERR_NO_MEMORY;
4473         }
4474
4475         i = 0;
4476         if (vsi->vlan_num == 0) {
4477                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4478                         (void)rte_memcpy(&mv_f[i].macaddr,
4479                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
4480                         mv_f[i].vlan_id = 0;
4481                         i++;
4482                 }
4483         } else {
4484                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4485                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
4486                                         vsi->vlan_num, &f->mac_info.mac_addr);
4487                         if (ret != I40E_SUCCESS)
4488                                 goto DONE;
4489                         i += vsi->vlan_num;
4490                 }
4491         }
4492
4493         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
4494 DONE:
4495         rte_free(mv_f);
4496
4497         return ret;
4498 }
4499
4500 int
4501 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4502 {
4503         struct i40e_macvlan_filter *mv_f;
4504         int mac_num;
4505         int ret = I40E_SUCCESS;
4506
4507         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
4508                 return I40E_ERR_PARAM;
4509
4510         /* If it's already set, just return */
4511         if (i40e_find_vlan_filter(vsi,vlan))
4512                 return I40E_SUCCESS;
4513
4514         mac_num = vsi->mac_num;
4515
4516         if (mac_num == 0) {
4517                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4518                 return I40E_ERR_PARAM;
4519         }
4520
4521         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4522
4523         if (mv_f == NULL) {
4524                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4525                 return I40E_ERR_NO_MEMORY;
4526         }
4527
4528         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4529
4530         if (ret != I40E_SUCCESS)
4531                 goto DONE;
4532
4533         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4534
4535         if (ret != I40E_SUCCESS)
4536                 goto DONE;
4537
4538         i40e_set_vlan_filter(vsi, vlan, 1);
4539
4540         vsi->vlan_num++;
4541         ret = I40E_SUCCESS;
4542 DONE:
4543         rte_free(mv_f);
4544         return ret;
4545 }
4546
4547 int
4548 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4549 {
4550         struct i40e_macvlan_filter *mv_f;
4551         int mac_num;
4552         int ret = I40E_SUCCESS;
4553
4554         /**
4555          * Vlan 0 is the generic filter for untagged packets
4556          * and can't be removed.
4557          */
4558         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
4559                 return I40E_ERR_PARAM;
4560
4561         /* If can't find it, just return */
4562         if (!i40e_find_vlan_filter(vsi, vlan))
4563                 return I40E_ERR_PARAM;
4564
4565         mac_num = vsi->mac_num;
4566
4567         if (mac_num == 0) {
4568                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4569                 return I40E_ERR_PARAM;
4570         }
4571
4572         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4573
4574         if (mv_f == NULL) {
4575                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4576                 return I40E_ERR_NO_MEMORY;
4577         }
4578
4579         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4580
4581         if (ret != I40E_SUCCESS)
4582                 goto DONE;
4583
4584         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
4585
4586         if (ret != I40E_SUCCESS)
4587                 goto DONE;
4588
4589         /* This is last vlan to remove, replace all mac filter with vlan 0 */
4590         if (vsi->vlan_num == 1) {
4591                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
4592                 if (ret != I40E_SUCCESS)
4593                         goto DONE;
4594
4595                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4596                 if (ret != I40E_SUCCESS)
4597                         goto DONE;
4598         }
4599
4600         i40e_set_vlan_filter(vsi, vlan, 0);
4601
4602         vsi->vlan_num--;
4603         ret = I40E_SUCCESS;
4604 DONE:
4605         rte_free(mv_f);
4606         return ret;
4607 }
4608
4609 int
4610 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
4611 {
4612         struct i40e_mac_filter *f;
4613         struct i40e_macvlan_filter *mv_f;
4614         int i, vlan_num = 0;
4615         int ret = I40E_SUCCESS;
4616
4617         /* If it's add and we've config it, return */
4618         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
4619         if (f != NULL)
4620                 return I40E_SUCCESS;
4621         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
4622                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
4623
4624                 /**
4625                  * If vlan_num is 0, that's the first time to add mac,
4626                  * set mask for vlan_id 0.
4627                  */
4628                 if (vsi->vlan_num == 0) {
4629                         i40e_set_vlan_filter(vsi, 0, 1);
4630                         vsi->vlan_num = 1;
4631                 }
4632                 vlan_num = vsi->vlan_num;
4633         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
4634                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
4635                 vlan_num = 1;
4636
4637         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4638         if (mv_f == NULL) {
4639                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4640                 return I40E_ERR_NO_MEMORY;
4641         }
4642
4643         for (i = 0; i < vlan_num; i++) {
4644                 mv_f[i].filter_type = mac_filter->filter_type;
4645                 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
4646                                 ETH_ADDR_LEN);
4647         }
4648
4649         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4650                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
4651                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
4652                                         &mac_filter->mac_addr);
4653                 if (ret != I40E_SUCCESS)
4654                         goto DONE;
4655         }
4656
4657         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
4658         if (ret != I40E_SUCCESS)
4659                 goto DONE;
4660
4661         /* Add the mac addr into mac list */
4662         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4663         if (f == NULL) {
4664                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4665                 ret = I40E_ERR_NO_MEMORY;
4666                 goto DONE;
4667         }
4668         (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
4669                         ETH_ADDR_LEN);
4670         f->mac_info.filter_type = mac_filter->filter_type;
4671         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4672         vsi->mac_num++;
4673
4674         ret = I40E_SUCCESS;
4675 DONE:
4676         rte_free(mv_f);
4677
4678         return ret;
4679 }
4680
4681 int
4682 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
4683 {
4684         struct i40e_mac_filter *f;
4685         struct i40e_macvlan_filter *mv_f;
4686         int i, vlan_num;
4687         enum rte_mac_filter_type filter_type;
4688         int ret = I40E_SUCCESS;
4689
4690         /* Can't find it, return an error */
4691         f = i40e_find_mac_filter(vsi, addr);
4692         if (f == NULL)
4693                 return I40E_ERR_PARAM;
4694
4695         vlan_num = vsi->vlan_num;
4696         filter_type = f->mac_info.filter_type;
4697         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4698                 filter_type == RTE_MACVLAN_HASH_MATCH) {
4699                 if (vlan_num == 0) {
4700                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
4701                         return I40E_ERR_PARAM;
4702                 }
4703         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
4704                         filter_type == RTE_MAC_HASH_MATCH)
4705                 vlan_num = 1;
4706
4707         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4708         if (mv_f == NULL) {
4709                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4710                 return I40E_ERR_NO_MEMORY;
4711         }
4712
4713         for (i = 0; i < vlan_num; i++) {
4714                 mv_f[i].filter_type = filter_type;
4715                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4716                                 ETH_ADDR_LEN);
4717         }
4718         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4719                         filter_type == RTE_MACVLAN_HASH_MATCH) {
4720                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
4721                 if (ret != I40E_SUCCESS)
4722                         goto DONE;
4723         }
4724
4725         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
4726         if (ret != I40E_SUCCESS)
4727                 goto DONE;
4728
4729         /* Remove the mac addr into mac list */
4730         TAILQ_REMOVE(&vsi->mac_list, f, next);
4731         rte_free(f);
4732         vsi->mac_num--;
4733
4734         ret = I40E_SUCCESS;
4735 DONE:
4736         rte_free(mv_f);
4737         return ret;
4738 }
4739
4740 /* Configure hash enable flags for RSS */
4741 uint64_t
4742 i40e_config_hena(uint64_t flags)
4743 {
4744         uint64_t hena = 0;
4745
4746         if (!flags)
4747                 return hena;
4748
4749         if (flags & ETH_RSS_FRAG_IPV4)
4750                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
4751         if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
4752                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
4753         if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
4754                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4755         if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
4756                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
4757         if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
4758                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
4759         if (flags & ETH_RSS_FRAG_IPV6)
4760                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
4761         if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
4762                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
4763         if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
4764                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
4765         if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
4766                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
4767         if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
4768                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
4769         if (flags & ETH_RSS_L2_PAYLOAD)
4770                 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
4771
4772         return hena;
4773 }
4774
4775 /* Parse the hash enable flags */
4776 uint64_t
4777 i40e_parse_hena(uint64_t flags)
4778 {
4779         uint64_t rss_hf = 0;
4780
4781         if (!flags)
4782                 return rss_hf;
4783         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
4784                 rss_hf |= ETH_RSS_FRAG_IPV4;
4785         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
4786                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
4787         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
4788                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
4789         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
4790                 rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
4791         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
4792                 rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
4793         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
4794                 rss_hf |= ETH_RSS_FRAG_IPV6;
4795         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
4796                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
4797         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
4798                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
4799         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
4800                 rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
4801         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
4802                 rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
4803         if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
4804                 rss_hf |= ETH_RSS_L2_PAYLOAD;
4805
4806         return rss_hf;
4807 }
4808
4809 /* Disable RSS */
4810 static void
4811 i40e_pf_disable_rss(struct i40e_pf *pf)
4812 {
4813         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4814         uint64_t hena;
4815
4816         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4817         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4818         hena &= ~I40E_RSS_HENA_ALL;
4819         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4820         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4821         I40E_WRITE_FLUSH(hw);
4822 }
4823
4824 static int
4825 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4826 {
4827         uint32_t *hash_key;
4828         uint8_t hash_key_len;
4829         uint64_t rss_hf;
4830         uint16_t i;
4831         uint64_t hena;
4832
4833         hash_key = (uint32_t *)(rss_conf->rss_key);
4834         hash_key_len = rss_conf->rss_key_len;
4835         if (hash_key != NULL && hash_key_len >=
4836                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4837                 /* Fill in RSS hash key */
4838                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4839                         I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4840         }
4841
4842         rss_hf = rss_conf->rss_hf;
4843         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4844         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4845         hena &= ~I40E_RSS_HENA_ALL;
4846         hena |= i40e_config_hena(rss_hf);
4847         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4848         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4849         I40E_WRITE_FLUSH(hw);
4850
4851         return 0;
4852 }
4853
4854 static int
4855 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4856                          struct rte_eth_rss_conf *rss_conf)
4857 {
4858         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4859         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4860         uint64_t hena;
4861
4862         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4863         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4864         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4865                 if (rss_hf != 0) /* Enable RSS */
4866                         return -EINVAL;
4867                 return 0; /* Nothing to do */
4868         }
4869         /* RSS enabled */
4870         if (rss_hf == 0) /* Disable RSS */
4871                 return -EINVAL;
4872
4873         return i40e_hw_rss_hash_set(hw, rss_conf);
4874 }
4875
4876 static int
4877 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4878                            struct rte_eth_rss_conf *rss_conf)
4879 {
4880         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4881         uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4882         uint64_t hena;
4883         uint16_t i;
4884
4885         if (hash_key != NULL) {
4886                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4887                         hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4888                 rss_conf->rss_key_len = i * sizeof(uint32_t);
4889         }
4890         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4891         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4892         rss_conf->rss_hf = i40e_parse_hena(hena);
4893
4894         return 0;
4895 }
4896
4897 static int
4898 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
4899 {
4900         switch (filter_type) {
4901         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
4902                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
4903                 break;
4904         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
4905                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
4906                 break;
4907         case RTE_TUNNEL_FILTER_IMAC_TENID:
4908                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
4909                 break;
4910         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
4911                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
4912                 break;
4913         case ETH_TUNNEL_FILTER_IMAC:
4914                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
4915                 break;
4916         default:
4917                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
4918                 return -EINVAL;
4919         }
4920
4921         return 0;
4922 }
4923
4924 static int
4925 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
4926                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
4927                         uint8_t add)
4928 {
4929         uint16_t ip_type;
4930         uint8_t tun_type = 0;
4931         int val, ret = 0;
4932         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4933         struct i40e_vsi *vsi = pf->main_vsi;
4934         struct i40e_aqc_add_remove_cloud_filters_element_data  *cld_filter;
4935         struct i40e_aqc_add_remove_cloud_filters_element_data  *pfilter;
4936
4937         cld_filter = rte_zmalloc("tunnel_filter",
4938                 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
4939                 0);
4940
4941         if (NULL == cld_filter) {
4942                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
4943                 return -EINVAL;
4944         }
4945         pfilter = cld_filter;
4946
4947         (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
4948                         sizeof(struct ether_addr));
4949         (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
4950                         sizeof(struct ether_addr));
4951
4952         pfilter->inner_vlan = tunnel_filter->inner_vlan;
4953         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
4954                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
4955                 (void)rte_memcpy(&pfilter->ipaddr.v4.data,
4956                                 &tunnel_filter->ip_addr,
4957                                 sizeof(pfilter->ipaddr.v4.data));
4958         } else {
4959                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
4960                 (void)rte_memcpy(&pfilter->ipaddr.v6.data,
4961                                 &tunnel_filter->ip_addr,
4962                                 sizeof(pfilter->ipaddr.v6.data));
4963         }
4964
4965         /* check tunneled type */
4966         switch (tunnel_filter->tunnel_type) {
4967         case RTE_TUNNEL_TYPE_VXLAN:
4968                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
4969                 break;
4970         case RTE_TUNNEL_TYPE_NVGRE:
4971                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
4972                 break;
4973         default:
4974                 /* Other tunnel types is not supported. */
4975                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
4976                 rte_free(cld_filter);
4977                 return -EINVAL;
4978         }
4979
4980         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
4981                                                 &pfilter->flags);
4982         if (val < 0) {
4983                 rte_free(cld_filter);
4984                 return -EINVAL;
4985         }
4986
4987         pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
4988                 (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
4989         pfilter->tenant_id = tunnel_filter->tenant_id;
4990         pfilter->queue_number = tunnel_filter->queue_id;
4991
4992         if (add)
4993                 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
4994         else
4995                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4996                                                 cld_filter, 1);
4997
4998         rte_free(cld_filter);
4999         return ret;
5000 }
5001
5002 static int
5003 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
5004 {
5005         uint8_t i;
5006
5007         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
5008                 if (pf->vxlan_ports[i] == port)
5009                         return i;
5010         }
5011
5012         return -1;
5013 }
5014
5015 static int
5016 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
5017 {
5018         int  idx, ret;
5019         uint8_t filter_idx;
5020         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5021
5022         idx = i40e_get_vxlan_port_idx(pf, port);
5023
5024         /* Check if port already exists */
5025         if (idx >= 0) {
5026                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
5027                 return -EINVAL;
5028         }
5029
5030         /* Now check if there is space to add the new port */
5031         idx = i40e_get_vxlan_port_idx(pf, 0);
5032         if (idx < 0) {
5033                 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
5034                         "not adding port %d", port);
5035                 return -ENOSPC;
5036         }
5037
5038         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
5039                                         &filter_idx, NULL);
5040         if (ret < 0) {
5041                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
5042                 return -1;
5043         }
5044
5045         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
5046                          port,  filter_idx);
5047
5048         /* New port: add it and mark its index in the bitmap */
5049         pf->vxlan_ports[idx] = port;
5050         pf->vxlan_bitmap |= (1 << idx);
5051
5052         if (!(pf->flags & I40E_FLAG_VXLAN))
5053                 pf->flags |= I40E_FLAG_VXLAN;
5054
5055         return 0;
5056 }
5057
5058 static int
5059 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
5060 {
5061         int idx;
5062         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5063
5064         if (!(pf->flags & I40E_FLAG_VXLAN)) {
5065                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
5066                 return -EINVAL;
5067         }
5068
5069         idx = i40e_get_vxlan_port_idx(pf, port);
5070
5071         if (idx < 0) {
5072                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
5073                 return -EINVAL;
5074         }
5075
5076         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
5077                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
5078                 return -1;
5079         }
5080
5081         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
5082                         port, idx);
5083
5084         pf->vxlan_ports[idx] = 0;
5085         pf->vxlan_bitmap &= ~(1 << idx);
5086
5087         if (!pf->vxlan_bitmap)
5088                 pf->flags &= ~I40E_FLAG_VXLAN;
5089
5090         return 0;
5091 }
5092
5093 /* Add UDP tunneling port */
5094 static int
5095 i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
5096                         struct rte_eth_udp_tunnel *udp_tunnel)
5097 {
5098         int ret = 0;
5099         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5100
5101         if (udp_tunnel == NULL)
5102                 return -EINVAL;
5103
5104         switch (udp_tunnel->prot_type) {
5105         case RTE_TUNNEL_TYPE_VXLAN:
5106                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
5107                 break;
5108
5109         case RTE_TUNNEL_TYPE_GENEVE:
5110         case RTE_TUNNEL_TYPE_TEREDO:
5111                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
5112                 ret = -1;
5113                 break;
5114
5115         default:
5116                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5117                 ret = -1;
5118                 break;
5119         }
5120
5121         return ret;
5122 }
5123
5124 /* Remove UDP tunneling port */
5125 static int
5126 i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
5127                         struct rte_eth_udp_tunnel *udp_tunnel)
5128 {
5129         int ret = 0;
5130         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5131
5132         if (udp_tunnel == NULL)
5133                 return -EINVAL;
5134
5135         switch (udp_tunnel->prot_type) {
5136         case RTE_TUNNEL_TYPE_VXLAN:
5137                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
5138                 break;
5139         case RTE_TUNNEL_TYPE_GENEVE:
5140         case RTE_TUNNEL_TYPE_TEREDO:
5141                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
5142                 ret = -1;
5143                 break;
5144         default:
5145                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5146                 ret = -1;
5147                 break;
5148         }
5149
5150         return ret;
5151 }
5152
5153 /* Calculate the maximum number of contiguous PF queues that are configured */
5154 static int
5155 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
5156 {
5157         struct rte_eth_dev_data *data = pf->dev_data;
5158         int i, num;
5159         struct i40e_rx_queue *rxq;
5160
5161         num = 0;
5162         for (i = 0; i < pf->lan_nb_qps; i++) {
5163                 rxq = data->rx_queues[i];
5164                 if (rxq && rxq->q_set)
5165                         num++;
5166                 else
5167                         break;
5168         }
5169
5170         return num;
5171 }
5172
5173 /* Configure RSS */
5174 static int
5175 i40e_pf_config_rss(struct i40e_pf *pf)
5176 {
5177         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5178         struct rte_eth_rss_conf rss_conf;
5179         uint32_t i, lut = 0;
5180         uint16_t j, num;
5181
5182         /*
5183          * If both VMDQ and RSS enabled, not all of PF queues are configured.
5184          * It's necessary to calulate the actual PF queues that are configured.
5185          */
5186         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
5187                 num = i40e_pf_calc_configured_queues_num(pf);
5188                 num = i40e_align_floor(num);
5189         } else
5190                 num = i40e_align_floor(pf->dev_data->nb_rx_queues);
5191
5192         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
5193                         num);
5194
5195         if (num == 0) {
5196                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
5197                 return -ENOTSUP;
5198         }
5199
5200         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
5201                 if (j == num)
5202                         j = 0;
5203                 lut = (lut << 8) | (j & ((0x1 <<
5204                         hw->func_caps.rss_table_entry_width) - 1));
5205                 if ((i & 3) == 3)
5206                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
5207         }
5208
5209         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
5210         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
5211                 i40e_pf_disable_rss(pf);
5212                 return 0;
5213         }
5214         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
5215                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
5216                 /* Random default keys */
5217                 static uint32_t rss_key_default[] = {0x6b793944,
5218                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
5219                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
5220                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
5221
5222                 rss_conf.rss_key = (uint8_t *)rss_key_default;
5223                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
5224                                                         sizeof(uint32_t);
5225         }
5226
5227         return i40e_hw_rss_hash_set(hw, &rss_conf);
5228 }
5229
5230 static int
5231 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
5232                         struct rte_eth_tunnel_filter_conf *filter)
5233 {
5234         if (pf == NULL || filter == NULL) {
5235                 PMD_DRV_LOG(ERR, "Invalid parameter");
5236                 return -EINVAL;
5237         }
5238
5239         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
5240                 PMD_DRV_LOG(ERR, "Invalid queue ID");
5241                 return -EINVAL;
5242         }
5243
5244         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
5245                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
5246                 return -EINVAL;
5247         }
5248
5249         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
5250                 (is_zero_ether_addr(filter->outer_mac))) {
5251                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
5252                 return -EINVAL;
5253         }
5254
5255         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
5256                 (is_zero_ether_addr(filter->inner_mac))) {
5257                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
5258                 return -EINVAL;
5259         }
5260
5261         return 0;
5262 }
5263
5264 static int
5265 i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
5266                         void *arg)
5267 {
5268         struct rte_eth_tunnel_filter_conf *filter;
5269         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5270         int ret = I40E_SUCCESS;
5271
5272         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
5273
5274         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
5275                 return I40E_ERR_PARAM;
5276
5277         switch (filter_op) {
5278         case RTE_ETH_FILTER_NOP:
5279                 if (!(pf->flags & I40E_FLAG_VXLAN))
5280                         ret = I40E_NOT_SUPPORTED;
5281         case RTE_ETH_FILTER_ADD:
5282                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
5283                 break;
5284         case RTE_ETH_FILTER_DELETE:
5285                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
5286                 break;
5287         default:
5288                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
5289                 ret = I40E_ERR_PARAM;
5290                 break;
5291         }
5292
5293         return ret;
5294 }
5295
5296 static int
5297 i40e_pf_config_mq_rx(struct i40e_pf *pf)
5298 {
5299         int ret = 0;
5300         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
5301
5302         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
5303                 PMD_INIT_LOG(ERR, "i40e doesn't support DCB yet");
5304                 return -ENOTSUP;
5305         }
5306
5307         /* RSS setup */
5308         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
5309                 ret = i40e_pf_config_rss(pf);
5310         else
5311                 i40e_pf_disable_rss(pf);
5312
5313         return ret;
5314 }
5315
5316 /* Get the symmetric hash enable configurations per port */
5317 static void
5318 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
5319 {
5320         uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
5321
5322         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
5323 }
5324
5325 /* Set the symmetric hash enable configurations per port */
5326 static void
5327 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
5328 {
5329         uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
5330
5331         if (enable > 0) {
5332                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
5333                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
5334                                                         "been enabled");
5335                         return;
5336                 }
5337                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
5338         } else {
5339                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
5340                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
5341                                                         "been disabled");
5342                         return;
5343                 }
5344                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
5345         }
5346         I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg);
5347         I40E_WRITE_FLUSH(hw);
5348 }
5349
5350 /*
5351  * Get global configurations of hash function type and symmetric hash enable
5352  * per flow type (pctype). Note that global configuration means it affects all
5353  * the ports on the same NIC.
5354  */
5355 static int
5356 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
5357                                    struct rte_eth_hash_global_conf *g_cfg)
5358 {
5359         uint32_t reg, mask = I40E_FLOW_TYPES;
5360         uint16_t i;
5361         enum i40e_filter_pctype pctype;
5362
5363         memset(g_cfg, 0, sizeof(*g_cfg));
5364         reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
5365         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
5366                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
5367         else
5368                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
5369         PMD_DRV_LOG(DEBUG, "Hash function is %s",
5370                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
5371
5372         for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
5373                 if (!(mask & (1UL << i)))
5374                         continue;
5375                 mask &= ~(1UL << i);
5376                 /* Bit set indicats the coresponding flow type is supported */
5377                 g_cfg->valid_bit_mask[0] |= (1UL << i);
5378                 pctype = i40e_flowtype_to_pctype(i);
5379                 reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype));
5380                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
5381                         g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
5382         }
5383
5384         return 0;
5385 }
5386
5387 static int
5388 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
5389 {
5390         uint32_t i;
5391         uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
5392
5393         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
5394                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
5395                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
5396                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
5397                                                 g_cfg->hash_func);
5398                 return -EINVAL;
5399         }
5400
5401         /*
5402          * As i40e supports less than 32 flow types, only first 32 bits need to
5403          * be checked.
5404          */
5405         mask0 = g_cfg->valid_bit_mask[0];
5406         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
5407                 if (i == 0) {
5408                         /* Check if any unsupported flow type configured */
5409                         if ((mask0 | i40e_mask) ^ i40e_mask)
5410                                 goto mask_err;
5411                 } else {
5412                         if (g_cfg->valid_bit_mask[i])
5413                                 goto mask_err;
5414                 }
5415         }
5416
5417         return 0;
5418
5419 mask_err:
5420         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
5421
5422         return -EINVAL;
5423 }
5424
5425 /*
5426  * Set global configurations of hash function type and symmetric hash enable
5427  * per flow type (pctype). Note any modifying global configuration will affect
5428  * all the ports on the same NIC.
5429  */
5430 static int
5431 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
5432                                    struct rte_eth_hash_global_conf *g_cfg)
5433 {
5434         int ret;
5435         uint16_t i;
5436         uint32_t reg;
5437         uint32_t mask0 = g_cfg->valid_bit_mask[0];
5438         enum i40e_filter_pctype pctype;
5439
5440         /* Check the input parameters */
5441         ret = i40e_hash_global_config_check(g_cfg);
5442         if (ret < 0)
5443                 return ret;
5444
5445         for (i = 0; mask0 && i < UINT32_BIT; i++) {
5446                 if (!(mask0 & (1UL << i)))
5447                         continue;
5448                 mask0 &= ~(1UL << i);
5449                 pctype = i40e_flowtype_to_pctype(i);
5450                 reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
5451                                 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
5452                 I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg);
5453         }
5454
5455         reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
5456         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
5457                 /* Toeplitz */
5458                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
5459                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
5460                                                                 "Toeplitz");
5461                         goto out;
5462                 }
5463                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
5464         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
5465                 /* Simple XOR */
5466                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
5467                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
5468                                                         "Simple XOR");
5469                         goto out;
5470                 }
5471                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
5472         } else
5473                 /* Use the default, and keep it as it is */
5474                 goto out;
5475
5476         I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg);
5477
5478 out:
5479         I40E_WRITE_FLUSH(hw);
5480
5481         return 0;
5482 }
5483
5484 static int
5485 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
5486 {
5487         int ret = 0;
5488
5489         if (!hw || !info) {
5490                 PMD_DRV_LOG(ERR, "Invalid pointer");
5491                 return -EFAULT;
5492         }
5493
5494         switch (info->info_type) {
5495         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
5496                 i40e_get_symmetric_hash_enable_per_port(hw,
5497                                         &(info->info.enable));
5498                 break;
5499         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
5500                 ret = i40e_get_hash_filter_global_config(hw,
5501                                 &(info->info.global_conf));
5502                 break;
5503         default:
5504                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
5505                                                         info->info_type);
5506                 ret = -EINVAL;
5507                 break;
5508         }
5509
5510         return ret;
5511 }
5512
5513 static int
5514 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
5515 {
5516         int ret = 0;
5517
5518         if (!hw || !info) {
5519                 PMD_DRV_LOG(ERR, "Invalid pointer");
5520                 return -EFAULT;
5521         }
5522
5523         switch (info->info_type) {
5524         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
5525                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
5526                 break;
5527         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
5528                 ret = i40e_set_hash_filter_global_config(hw,
5529                                 &(info->info.global_conf));
5530                 break;
5531         default:
5532                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
5533                                                         info->info_type);
5534                 ret = -EINVAL;
5535                 break;
5536         }
5537
5538         return ret;
5539 }
5540
5541 /* Operations for hash function */
5542 static int
5543 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
5544                       enum rte_filter_op filter_op,
5545                       void *arg)
5546 {
5547         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5548         int ret = 0;
5549
5550         switch (filter_op) {
5551         case RTE_ETH_FILTER_NOP:
5552                 break;
5553         case RTE_ETH_FILTER_GET:
5554                 ret = i40e_hash_filter_get(hw,
5555                         (struct rte_eth_hash_filter_info *)arg);
5556                 break;
5557         case RTE_ETH_FILTER_SET:
5558                 ret = i40e_hash_filter_set(hw,
5559                         (struct rte_eth_hash_filter_info *)arg);
5560                 break;
5561         default:
5562                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
5563                                                                 filter_op);
5564                 ret = -ENOTSUP;
5565                 break;
5566         }
5567
5568         return ret;
5569 }
5570
5571 /*
5572  * Configure ethertype filter, which can director packet by filtering
5573  * with mac address and ether_type or only ether_type
5574  */
5575 static int
5576 i40e_ethertype_filter_set(struct i40e_pf *pf,
5577                         struct rte_eth_ethertype_filter *filter,
5578                         bool add)
5579 {
5580         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5581         struct i40e_control_filter_stats stats;
5582         uint16_t flags = 0;
5583         int ret;
5584
5585         if (filter->queue >= pf->dev_data->nb_rx_queues) {
5586                 PMD_DRV_LOG(ERR, "Invalid queue ID");
5587                 return -EINVAL;
5588         }
5589         if (filter->ether_type == ETHER_TYPE_IPv4 ||
5590                 filter->ether_type == ETHER_TYPE_IPv6) {
5591                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
5592                         " control packet filter.", filter->ether_type);
5593                 return -EINVAL;
5594         }
5595         if (filter->ether_type == ETHER_TYPE_VLAN)
5596                 PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
5597                         " not supported.");
5598
5599         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5600                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5601         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5602                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5603         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5604
5605         memset(&stats, 0, sizeof(stats));
5606         ret = i40e_aq_add_rem_control_packet_filter(hw,
5607                         filter->mac_addr.addr_bytes,
5608                         filter->ether_type, flags,
5609                         pf->main_vsi->seid,
5610                         filter->queue, add, &stats, NULL);
5611
5612         PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
5613                          " mac_etype_used = %u, etype_used = %u,"
5614                          " mac_etype_free = %u, etype_free = %u\n",
5615                          ret, stats.mac_etype_used, stats.etype_used,
5616                          stats.mac_etype_free, stats.etype_free);
5617         if (ret < 0)
5618                 return -ENOSYS;
5619         return 0;
5620 }
5621
5622 /*
5623  * Handle operations for ethertype filter.
5624  */
5625 static int
5626 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
5627                                 enum rte_filter_op filter_op,
5628                                 void *arg)
5629 {
5630         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5631         int ret = 0;
5632
5633         if (filter_op == RTE_ETH_FILTER_NOP)
5634                 return ret;
5635
5636         if (arg == NULL) {
5637                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
5638                             filter_op);
5639                 return -EINVAL;
5640         }
5641
5642         switch (filter_op) {
5643         case RTE_ETH_FILTER_ADD:
5644                 ret = i40e_ethertype_filter_set(pf,
5645                         (struct rte_eth_ethertype_filter *)arg,
5646                         TRUE);
5647                 break;
5648         case RTE_ETH_FILTER_DELETE:
5649                 ret = i40e_ethertype_filter_set(pf,
5650                         (struct rte_eth_ethertype_filter *)arg,
5651                         FALSE);
5652                 break;
5653         default:
5654                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
5655                 ret = -ENOSYS;
5656                 break;
5657         }
5658         return ret;
5659 }
5660
5661 static int
5662 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
5663                      enum rte_filter_type filter_type,
5664                      enum rte_filter_op filter_op,
5665                      void *arg)
5666 {
5667         int ret = 0;
5668
5669         if (dev == NULL)
5670                 return -EINVAL;
5671
5672         switch (filter_type) {
5673         case RTE_ETH_FILTER_HASH:
5674                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
5675                 break;
5676         case RTE_ETH_FILTER_MACVLAN:
5677                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
5678                 break;
5679         case RTE_ETH_FILTER_ETHERTYPE:
5680                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
5681                 break;
5682         case RTE_ETH_FILTER_TUNNEL:
5683                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
5684                 break;
5685         case RTE_ETH_FILTER_FDIR:
5686                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
5687                 break;
5688         default:
5689                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5690                                                         filter_type);
5691                 ret = -EINVAL;
5692                 break;
5693         }
5694
5695         return ret;
5696 }
5697
5698 /*
5699  * As some registers wouldn't be reset unless a global hardware reset,
5700  * hardware initialization is needed to put those registers into an
5701  * expected initial state.
5702  */
5703 static void
5704 i40e_hw_init(struct i40e_hw *hw)
5705 {
5706         /* clear the PF Queue Filter control register */
5707         I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0);
5708
5709         /* Disable symmetric hash per port */
5710         i40e_set_symmetric_hash_enable_per_port(hw, 0);
5711 }
5712
5713 enum i40e_filter_pctype
5714 i40e_flowtype_to_pctype(uint16_t flow_type)
5715 {
5716         static const enum i40e_filter_pctype pctype_table[] = {
5717                 [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
5718                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
5719                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
5720                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
5721                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
5722                 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
5723                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
5724                 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
5725                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
5726                 [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
5727                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
5728                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
5729                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
5730                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
5731                 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
5732                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
5733                 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
5734                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
5735                 [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
5736         };
5737
5738         return pctype_table[flow_type];
5739 }
5740
5741 uint16_t
5742 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
5743 {
5744         static const uint16_t flowtype_table[] = {
5745                 [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
5746                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
5747                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
5748                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
5749                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
5750                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
5751                         RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
5752                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
5753                         RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
5754                 [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
5755                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
5756                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
5757                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
5758                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
5759                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
5760                         RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
5761                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
5762                         RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
5763                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
5764         };
5765
5766         return flowtype_table[pctype];
5767 }
5768
5769 /*
5770  * On X710, performance number is far from the expectation on recent firmware
5771  * versions; on XL710, performance number is also far from the expectation on
5772  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
5773  * mode is enabled and port MAC address is equal to the packet destination MAC
5774  * address. The fix for this issue may not be integrated in the following
5775  * firmware version. So the workaround in software driver is needed. It needs
5776  * to modify the initial values of 3 internal only registers for both X710 and
5777  * XL710. Note that the values for X710 or XL710 could be different, and the
5778  * workaround can be removed when it is fixed in firmware in the future.
5779  */
5780
5781 /* For both X710 and XL710 */
5782 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
5783 #define I40E_GL_SWR_PRI_JOIN_MAP_0       0x26CE00
5784
5785 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
5786 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
5787
5788 /* For X710 */
5789 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
5790 /* For XL710 */
5791 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
5792 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
5793
5794 static void
5795 i40e_configure_registers(struct i40e_hw *hw)
5796 {
5797         static struct {
5798                 uint32_t addr;
5799                 uint64_t val;
5800         } reg_table[] = {
5801                 {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
5802                 {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
5803                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
5804         };
5805         uint64_t reg;
5806         uint32_t i;
5807         int ret;
5808
5809         for (i = 0; i < RTE_DIM(reg_table); i++) {
5810                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
5811                         if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
5812                                 reg_table[i].val =
5813                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
5814                         else /* For X710 */
5815                                 reg_table[i].val =
5816                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
5817                 }
5818
5819                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
5820                                                         &reg, NULL);
5821                 if (ret < 0) {
5822                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
5823                                                         reg_table[i].addr);
5824                         break;
5825                 }
5826                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
5827                                                 reg_table[i].addr, reg);
5828                 if (reg == reg_table[i].val)
5829                         continue;
5830
5831                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
5832                                                 reg_table[i].val, NULL);
5833                 if (ret < 0) {
5834                         PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
5835                                 "address of 0x%"PRIx32, reg_table[i].val,
5836                                                         reg_table[i].addr);
5837                         break;
5838                 }
5839                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
5840                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
5841         }
5842 }
5843
5844 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
5845 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
5846 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
5847 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
5848 static int
5849 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
5850 {
5851         uint32_t reg;
5852         int ret;
5853
5854         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
5855                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
5856                 return -EINVAL;
5857         }
5858
5859         /* Configure for double VLAN RX stripping */
5860         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
5861         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
5862                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
5863                 ret = i40e_aq_debug_write_register(hw,
5864                                                    I40E_VSI_TSR(vsi->vsi_id),
5865                                                    reg, NULL);
5866                 if (ret < 0) {
5867                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
5868                                     vsi->vsi_id);
5869                         return I40E_ERR_CONFIG;
5870                 }
5871         }
5872
5873         /* Configure for double VLAN TX insertion */
5874         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
5875         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
5876                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
5877                 ret = i40e_aq_debug_write_register(hw,
5878                                                    I40E_VSI_L2TAGSTXVALID(
5879                                                    vsi->vsi_id), reg, NULL);
5880                 if (ret < 0) {
5881                         PMD_DRV_LOG(ERR, "Failed to update "
5882                                 "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
5883                         return I40E_ERR_CONFIG;
5884                 }
5885         }
5886
5887         return 0;
5888 }
5889
5890 /**
5891  * i40e_aq_add_mirror_rule
5892  * @hw: pointer to the hardware structure
5893  * @seid: VEB seid to add mirror rule to
5894  * @dst_id: destination vsi seid
5895  * @entries: Buffer which contains the entities to be mirrored
5896  * @count: number of entities contained in the buffer
5897  * @rule_id:the rule_id of the rule to be added
5898  *
5899  * Add a mirror rule for a given veb.
5900  *
5901  **/
5902 static enum i40e_status_code
5903 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
5904                         uint16_t seid, uint16_t dst_id,
5905                         uint16_t rule_type, uint16_t *entries,
5906                         uint16_t count, uint16_t *rule_id)
5907 {
5908         struct i40e_aq_desc desc;
5909         struct i40e_aqc_add_delete_mirror_rule cmd;
5910         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
5911                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
5912                 &desc.params.raw;
5913         uint16_t buff_len;
5914         enum i40e_status_code status;
5915
5916         i40e_fill_default_direct_cmd_desc(&desc,
5917                                           i40e_aqc_opc_add_mirror_rule);
5918         memset(&cmd, 0, sizeof(cmd));
5919
5920         buff_len = sizeof(uint16_t) * count;
5921         desc.datalen = rte_cpu_to_le_16(buff_len);
5922         if (buff_len > 0)
5923                 desc.flags |= rte_cpu_to_le_16(
5924                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5925         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
5926                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
5927         cmd.num_entries = rte_cpu_to_le_16(count);
5928         cmd.seid = rte_cpu_to_le_16(seid);
5929         cmd.destination = rte_cpu_to_le_16(dst_id);
5930
5931         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
5932         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
5933         PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
5934                          "rule_id = %u"
5935                          " mirror_rules_used = %u, mirror_rules_free = %u,",
5936                          hw->aq.asq_last_status, resp->rule_id,
5937                          resp->mirror_rules_used, resp->mirror_rules_free);
5938         *rule_id = rte_le_to_cpu_16(resp->rule_id);
5939
5940         return status;
5941 }
5942
5943 /**
5944  * i40e_aq_del_mirror_rule
5945  * @hw: pointer to the hardware structure
5946  * @seid: VEB seid to add mirror rule to
5947  * @entries: Buffer which contains the entities to be mirrored
5948  * @count: number of entities contained in the buffer
5949  * @rule_id:the rule_id of the rule to be delete
5950  *
5951  * Delete a mirror rule for a given veb.
5952  *
5953  **/
5954 static enum i40e_status_code
5955 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
5956                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
5957                 uint16_t count, uint16_t rule_id)
5958 {
5959         struct i40e_aq_desc desc;
5960         struct i40e_aqc_add_delete_mirror_rule cmd;
5961         uint16_t buff_len = 0;
5962         enum i40e_status_code status;
5963         void *buff = NULL;
5964
5965         i40e_fill_default_direct_cmd_desc(&desc,
5966                                           i40e_aqc_opc_delete_mirror_rule);
5967         memset(&cmd, 0, sizeof(cmd));
5968         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
5969                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
5970                                                           I40E_AQ_FLAG_RD));
5971                 cmd.num_entries = count;
5972                 buff_len = sizeof(uint16_t) * count;
5973                 desc.datalen = rte_cpu_to_le_16(buff_len);
5974                 buff = (void *)entries;
5975         } else
5976                 /* rule id is filled in destination field for deleting mirror rule */
5977                 cmd.destination = rte_cpu_to_le_16(rule_id);
5978
5979         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
5980                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
5981         cmd.seid = rte_cpu_to_le_16(seid);
5982
5983         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
5984         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
5985
5986         return status;
5987 }
5988
5989 /**
5990  * i40e_mirror_rule_set
5991  * @dev: pointer to the hardware structure
5992  * @mirror_conf: mirror rule info
5993  * @sw_id: mirror rule's sw_id
5994  * @on: enable/disable
5995  *
5996  * set a mirror rule.
5997  *
5998  **/
5999 static int
6000 i40e_mirror_rule_set(struct rte_eth_dev *dev,
6001                         struct rte_eth_mirror_conf *mirror_conf,
6002                         uint8_t sw_id, uint8_t on)
6003 {
6004         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6005         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6006         struct i40e_mirror_rule *it, *mirr_rule = NULL;
6007         struct i40e_mirror_rule *parent = NULL;
6008         uint16_t seid, dst_seid, rule_id;
6009         uint16_t i, j = 0;
6010         int ret;
6011
6012         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
6013
6014         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
6015                 PMD_DRV_LOG(ERR, "mirror rule can not be configured"
6016                         " without veb or vfs.");
6017                 return -ENOSYS;
6018         }
6019         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
6020                 PMD_DRV_LOG(ERR, "mirror table is full.");
6021                 return -ENOSPC;
6022         }
6023         if (mirror_conf->dst_pool > pf->vf_num) {
6024                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
6025                                  mirror_conf->dst_pool);
6026                 return -EINVAL;
6027         }
6028
6029         seid = pf->main_vsi->veb->seid;
6030
6031         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
6032                 if (sw_id <= it->index) {
6033                         mirr_rule = it;
6034                         break;
6035                 }
6036                 parent = it;
6037         }
6038         if (mirr_rule && sw_id == mirr_rule->index) {
6039                 if (on) {
6040                         PMD_DRV_LOG(ERR, "mirror rule exists.");
6041                         return -EEXIST;
6042                 } else {
6043                         ret = i40e_aq_del_mirror_rule(hw, seid,
6044                                         mirr_rule->rule_type,
6045                                         mirr_rule->entries,
6046                                         mirr_rule->num_entries, mirr_rule->id);
6047                         if (ret < 0) {
6048                                 PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
6049                                                    " ret = %d, aq_err = %d.",
6050                                                    ret, hw->aq.asq_last_status);
6051                                 return -ENOSYS;
6052                         }
6053                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
6054                         rte_free(mirr_rule);
6055                         pf->nb_mirror_rule--;
6056                         return 0;
6057                 }
6058         } else if (!on) {
6059                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
6060                 return -ENOENT;
6061         }
6062
6063         mirr_rule = rte_zmalloc("i40e_mirror_rule",
6064                                 sizeof(struct i40e_mirror_rule) , 0);
6065         if (!mirr_rule) {
6066                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6067                 return I40E_ERR_NO_MEMORY;
6068         }
6069         switch (mirror_conf->rule_type) {
6070         case ETH_MIRROR_VLAN:
6071                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
6072                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
6073                                 mirr_rule->entries[j] =
6074                                         mirror_conf->vlan.vlan_id[i];
6075                                 j++;
6076                         }
6077                 }
6078                 if (j == 0) {
6079                         PMD_DRV_LOG(ERR, "vlan is not specified.");
6080                         rte_free(mirr_rule);
6081                         return -EINVAL;
6082                 }
6083                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
6084                 break;
6085         case ETH_MIRROR_VIRTUAL_POOL_UP:
6086         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
6087                 /* check if the specified pool bit is out of range */
6088                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
6089                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
6090                         rte_free(mirr_rule);
6091                         return -EINVAL;
6092                 }
6093                 for (i = 0, j = 0; i < pf->vf_num; i++) {
6094                         if (mirror_conf->pool_mask & (1ULL << i)) {
6095                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
6096                                 j++;
6097                         }
6098                 }
6099                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
6100                         /* add pf vsi to entries */
6101                         mirr_rule->entries[j] = pf->main_vsi_seid;
6102                         j++;
6103                 }
6104                 if (j == 0) {
6105                         PMD_DRV_LOG(ERR, "pool is not specified.");
6106                         rte_free(mirr_rule);
6107                         return -EINVAL;
6108                 }
6109                 /* egress and ingress in aq commands means from switch but not port */
6110                 mirr_rule->rule_type =
6111                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
6112                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
6113                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
6114                 break;
6115         case ETH_MIRROR_UPLINK_PORT:
6116                 /* egress and ingress in aq commands means from switch but not port*/
6117                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
6118                 break;
6119         case ETH_MIRROR_DOWNLINK_PORT:
6120                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
6121                 break;
6122         default:
6123                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
6124                         mirror_conf->rule_type);
6125                 rte_free(mirr_rule);
6126                 return -EINVAL;
6127         }
6128
6129         /* If the dst_pool is equal to vf_num, consider it as PF */
6130         if (mirror_conf->dst_pool == pf->vf_num)
6131                 dst_seid = pf->main_vsi_seid;
6132         else
6133                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
6134
6135         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
6136                                       mirr_rule->rule_type, mirr_rule->entries,
6137                                       j, &rule_id);
6138         if (ret < 0) {
6139                 PMD_DRV_LOG(ERR, "failed to add mirror rule:"
6140                                    " ret = %d, aq_err = %d.",
6141                                    ret, hw->aq.asq_last_status);
6142                 rte_free(mirr_rule);
6143                 return -ENOSYS;
6144         }
6145
6146         mirr_rule->index = sw_id;
6147         mirr_rule->num_entries = j;
6148         mirr_rule->id = rule_id;
6149         mirr_rule->dst_vsi_seid = dst_seid;
6150
6151         if (parent)
6152                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
6153         else
6154                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
6155
6156         pf->nb_mirror_rule++;
6157         return 0;
6158 }
6159
6160 /**
6161  * i40e_mirror_rule_reset
6162  * @dev: pointer to the device
6163  * @sw_id: mirror rule's sw_id
6164  *
6165  * reset a mirror rule.
6166  *
6167  **/
6168 static int
6169 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
6170 {
6171         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6172         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6173         struct i40e_mirror_rule *it, *mirr_rule = NULL;
6174         uint16_t seid;
6175         int ret;
6176
6177         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
6178
6179         seid = pf->main_vsi->veb->seid;
6180
6181         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
6182                 if (sw_id == it->index) {
6183                         mirr_rule = it;
6184                         break;
6185                 }
6186         }
6187         if (mirr_rule) {
6188                 ret = i40e_aq_del_mirror_rule(hw, seid,
6189                                 mirr_rule->rule_type,
6190                                 mirr_rule->entries,
6191                                 mirr_rule->num_entries, mirr_rule->id);
6192                 if (ret < 0) {
6193                         PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
6194                                            " status = %d, aq_err = %d.",
6195                                            ret, hw->aq.asq_last_status);
6196                         return -ENOSYS;
6197                 }
6198                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
6199                 rte_free(mirr_rule);
6200                 pf->nb_mirror_rule--;
6201         } else {
6202                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
6203                 return -ENOENT;
6204         }
6205         return 0;
6206 }
6207
6208 static int
6209 i40e_timesync_enable(struct rte_eth_dev *dev)
6210 {
6211         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6212         struct rte_eth_link *link = &dev->data->dev_link;
6213         uint32_t tsync_ctl_l;
6214         uint32_t tsync_ctl_h;
6215         uint32_t tsync_inc_l;
6216         uint32_t tsync_inc_h;
6217
6218         switch (link->link_speed) {
6219         case ETH_LINK_SPEED_40G:
6220                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
6221                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
6222                 break;
6223         case ETH_LINK_SPEED_10G:
6224                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
6225                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
6226                 break;
6227         case ETH_LINK_SPEED_1000:
6228                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
6229                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
6230                 break;
6231         default:
6232                 tsync_inc_l = 0x0;
6233                 tsync_inc_h = 0x0;
6234         }
6235
6236         /* Clear timesync registers. */
6237         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
6238         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
6239         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(0));
6240         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(1));
6241         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(2));
6242         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(3));
6243         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
6244
6245         /* Set the timesync increment value. */
6246         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
6247         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
6248
6249         /* Enable timestamping of PTP packets. */
6250         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
6251         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
6252
6253         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
6254         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
6255         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
6256
6257         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
6258         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
6259
6260         return 0;
6261 }
6262
6263 static int
6264 i40e_timesync_disable(struct rte_eth_dev *dev)
6265 {
6266         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6267         uint32_t tsync_ctl_l;
6268         uint32_t tsync_ctl_h;
6269
6270         /* Disable timestamping of transmitted PTP packets. */
6271         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
6272         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
6273
6274         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
6275         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
6276
6277         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
6278         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
6279
6280         /* Set the timesync increment value. */
6281         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
6282         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
6283
6284         return 0;
6285 }
6286
6287 static int
6288 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
6289                                 struct timespec *timestamp, uint32_t flags)
6290 {
6291         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6292         uint32_t sync_status;
6293         uint32_t rx_stmpl;
6294         uint32_t rx_stmph;
6295         uint32_t index = flags & 0x03;
6296
6297         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
6298         if ((sync_status & (1 << index)) == 0)
6299                 return -EINVAL;
6300
6301         rx_stmpl = I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
6302         rx_stmph = I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index));
6303
6304         timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
6305         timestamp->tv_nsec = 0;
6306
6307         return  0;
6308 }
6309
6310 static int
6311 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
6312                                 struct timespec *timestamp)
6313 {
6314         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6315         uint32_t sync_status;
6316         uint32_t tx_stmpl;
6317         uint32_t tx_stmph;
6318
6319         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
6320         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
6321                 return -EINVAL;
6322
6323         tx_stmpl = I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
6324         tx_stmph = I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
6325
6326         timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
6327         timestamp->tv_nsec = 0;
6328
6329         return  0;
6330 }