i40e: add ieee1588 timestamping
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42
43 #include <rte_string_fns.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_alarm.h>
51 #include <rte_dev.h>
52 #include <rte_eth_ctrl.h>
53
54 #include "i40e_logs.h"
55 #include "base/i40e_prototype.h"
56 #include "base/i40e_adminq_cmd.h"
57 #include "base/i40e_type.h"
58 #include "base/i40e_register.h"
59 #include "i40e_ethdev.h"
60 #include "i40e_rxtx.h"
61 #include "i40e_pf.h"
62
63 /* Maximun number of MAC addresses */
64 #define I40E_NUM_MACADDR_MAX       64
65 #define I40E_CLEAR_PXE_WAIT_MS     200
66
67 /* Maximun number of capability elements */
68 #define I40E_MAX_CAP_ELE_NUM       128
69
70 /* Wait count and inteval */
71 #define I40E_CHK_Q_ENA_COUNT       1000
72 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
73
74 /* Maximun number of VSI */
75 #define I40E_MAX_NUM_VSIS          (384UL)
76
77 /* Default queue interrupt throttling time in microseconds */
78 #define I40E_ITR_INDEX_DEFAULT          0
79 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
80 #define I40E_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
81
82 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
83
84 /* Mask of PF interrupt causes */
85 #define I40E_PFINT_ICR0_ENA_MASK ( \
86                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
87                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
88                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
89                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
90                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
91                 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
92                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
93                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
94                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
95                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
96
97 #define I40E_FLOW_TYPES ( \
98         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
99         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
100         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
103         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
104         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
105         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
106         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
107         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
108         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
109
110 #define I40E_PTP_40GB_INCVAL  0x0199999999ULL
111 #define I40E_PTP_10GB_INCVAL  0x0333333333ULL
112 #define I40E_PTP_1GB_INCVAL   0x2000000000ULL
113 #define I40E_PRTTSYN_TSYNENA  0x80000000
114 #define I40E_PRTTSYN_TSYNTYPE 0x0e000000
115
116 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
117 static int i40e_dev_configure(struct rte_eth_dev *dev);
118 static int i40e_dev_start(struct rte_eth_dev *dev);
119 static void i40e_dev_stop(struct rte_eth_dev *dev);
120 static void i40e_dev_close(struct rte_eth_dev *dev);
121 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
122 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
123 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
124 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
125 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
126 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
127 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
128                                struct rte_eth_stats *stats);
129 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
130 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
131                                             uint16_t queue_id,
132                                             uint8_t stat_idx,
133                                             uint8_t is_rx);
134 static void i40e_dev_info_get(struct rte_eth_dev *dev,
135                               struct rte_eth_dev_info *dev_info);
136 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
137                                 uint16_t vlan_id,
138                                 int on);
139 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
140 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
141 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
142                                       uint16_t queue,
143                                       int on);
144 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
145 static int i40e_dev_led_on(struct rte_eth_dev *dev);
146 static int i40e_dev_led_off(struct rte_eth_dev *dev);
147 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
148                               struct rte_eth_fc_conf *fc_conf);
149 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
150                                        struct rte_eth_pfc_conf *pfc_conf);
151 static void i40e_macaddr_add(struct rte_eth_dev *dev,
152                           struct ether_addr *mac_addr,
153                           uint32_t index,
154                           uint32_t pool);
155 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
156 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
157                                     struct rte_eth_rss_reta_entry64 *reta_conf,
158                                     uint16_t reta_size);
159 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
160                                    struct rte_eth_rss_reta_entry64 *reta_conf,
161                                    uint16_t reta_size);
162
163 static int i40e_get_cap(struct i40e_hw *hw);
164 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
165 static int i40e_pf_setup(struct i40e_pf *pf);
166 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
167 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
168 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
169                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
170 static void i40e_stat_update_48(struct i40e_hw *hw,
171                                uint32_t hireg,
172                                uint32_t loreg,
173                                bool offset_loaded,
174                                uint64_t *offset,
175                                uint64_t *stat);
176 static void i40e_pf_config_irq0(struct i40e_hw *hw);
177 static void i40e_dev_interrupt_handler(
178                 __rte_unused struct rte_intr_handle *handle, void *param);
179 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
180                                 uint32_t base, uint32_t num);
181 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
182 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
183                         uint32_t base);
184 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
185                         uint16_t num);
186 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
187 static int i40e_veb_release(struct i40e_veb *veb);
188 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
189                                                 struct i40e_vsi *vsi);
190 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
191 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
192 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
193                                              struct i40e_macvlan_filter *mv_f,
194                                              int num,
195                                              struct ether_addr *addr);
196 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
197                                              struct i40e_macvlan_filter *mv_f,
198                                              int num,
199                                              uint16_t vlan);
200 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
201 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
202                                     struct rte_eth_rss_conf *rss_conf);
203 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
204                                       struct rte_eth_rss_conf *rss_conf);
205 static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
206                                 struct rte_eth_udp_tunnel *udp_tunnel);
207 static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
208                                 struct rte_eth_udp_tunnel *udp_tunnel);
209 static int i40e_ethertype_filter_set(struct i40e_pf *pf,
210                         struct rte_eth_ethertype_filter *filter,
211                         bool add);
212 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
213                                 enum rte_filter_op filter_op,
214                                 void *arg);
215 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
216                                 enum rte_filter_type filter_type,
217                                 enum rte_filter_op filter_op,
218                                 void *arg);
219 static void i40e_configure_registers(struct i40e_hw *hw);
220 static void i40e_hw_init(struct i40e_hw *hw);
221 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
222 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
223                         struct rte_eth_mirror_conf *mirror_conf,
224                         uint8_t sw_id, uint8_t on);
225 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
226
227 static int i40e_timesync_enable(struct rte_eth_dev *dev);
228 static int i40e_timesync_disable(struct rte_eth_dev *dev);
229 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
230                                            struct timespec *timestamp,
231                                            uint32_t flags);
232 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
233                                            struct timespec *timestamp);
234
235 static const struct rte_pci_id pci_id_i40e_map[] = {
236 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
237 #include "rte_pci_dev_ids.h"
238 { .vendor_id = 0, /* sentinel */ },
239 };
240
241 static const struct eth_dev_ops i40e_eth_dev_ops = {
242         .dev_configure                = i40e_dev_configure,
243         .dev_start                    = i40e_dev_start,
244         .dev_stop                     = i40e_dev_stop,
245         .dev_close                    = i40e_dev_close,
246         .promiscuous_enable           = i40e_dev_promiscuous_enable,
247         .promiscuous_disable          = i40e_dev_promiscuous_disable,
248         .allmulticast_enable          = i40e_dev_allmulticast_enable,
249         .allmulticast_disable         = i40e_dev_allmulticast_disable,
250         .dev_set_link_up              = i40e_dev_set_link_up,
251         .dev_set_link_down            = i40e_dev_set_link_down,
252         .link_update                  = i40e_dev_link_update,
253         .stats_get                    = i40e_dev_stats_get,
254         .stats_reset                  = i40e_dev_stats_reset,
255         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
256         .dev_infos_get                = i40e_dev_info_get,
257         .vlan_filter_set              = i40e_vlan_filter_set,
258         .vlan_tpid_set                = i40e_vlan_tpid_set,
259         .vlan_offload_set             = i40e_vlan_offload_set,
260         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
261         .vlan_pvid_set                = i40e_vlan_pvid_set,
262         .rx_queue_start               = i40e_dev_rx_queue_start,
263         .rx_queue_stop                = i40e_dev_rx_queue_stop,
264         .tx_queue_start               = i40e_dev_tx_queue_start,
265         .tx_queue_stop                = i40e_dev_tx_queue_stop,
266         .rx_queue_setup               = i40e_dev_rx_queue_setup,
267         .rx_queue_release             = i40e_dev_rx_queue_release,
268         .rx_queue_count               = i40e_dev_rx_queue_count,
269         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
270         .tx_queue_setup               = i40e_dev_tx_queue_setup,
271         .tx_queue_release             = i40e_dev_tx_queue_release,
272         .dev_led_on                   = i40e_dev_led_on,
273         .dev_led_off                  = i40e_dev_led_off,
274         .flow_ctrl_set                = i40e_flow_ctrl_set,
275         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
276         .mac_addr_add                 = i40e_macaddr_add,
277         .mac_addr_remove              = i40e_macaddr_remove,
278         .reta_update                  = i40e_dev_rss_reta_update,
279         .reta_query                   = i40e_dev_rss_reta_query,
280         .rss_hash_update              = i40e_dev_rss_hash_update,
281         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
282         .udp_tunnel_add               = i40e_dev_udp_tunnel_add,
283         .udp_tunnel_del               = i40e_dev_udp_tunnel_del,
284         .filter_ctrl                  = i40e_dev_filter_ctrl,
285         .mirror_rule_set              = i40e_mirror_rule_set,
286         .mirror_rule_reset            = i40e_mirror_rule_reset,
287         .timesync_enable              = i40e_timesync_enable,
288         .timesync_disable             = i40e_timesync_disable,
289         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
290         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
291 };
292
293 static struct eth_driver rte_i40e_pmd = {
294         .pci_drv = {
295                 .name = "rte_i40e_pmd",
296                 .id_table = pci_id_i40e_map,
297                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
298         },
299         .eth_dev_init = eth_i40e_dev_init,
300         .dev_private_size = sizeof(struct i40e_adapter),
301 };
302
303 static inline int
304 i40e_align_floor(int n)
305 {
306         if (n == 0)
307                 return 0;
308         return (1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)));
309 }
310
311 static inline int
312 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
313                                      struct rte_eth_link *link)
314 {
315         struct rte_eth_link *dst = link;
316         struct rte_eth_link *src = &(dev->data->dev_link);
317
318         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
319                                         *(uint64_t *)src) == 0)
320                 return -1;
321
322         return 0;
323 }
324
325 static inline int
326 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
327                                       struct rte_eth_link *link)
328 {
329         struct rte_eth_link *dst = &(dev->data->dev_link);
330         struct rte_eth_link *src = link;
331
332         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
333                                         *(uint64_t *)src) == 0)
334                 return -1;
335
336         return 0;
337 }
338
339 /*
340  * Driver initialization routine.
341  * Invoked once at EAL init time.
342  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
343  */
344 static int
345 rte_i40e_pmd_init(const char *name __rte_unused,
346                   const char *params __rte_unused)
347 {
348         PMD_INIT_FUNC_TRACE();
349         rte_eth_driver_register(&rte_i40e_pmd);
350
351         return 0;
352 }
353
354 static struct rte_driver rte_i40e_driver = {
355         .type = PMD_PDEV,
356         .init = rte_i40e_pmd_init,
357 };
358
359 PMD_REGISTER_DRIVER(rte_i40e_driver);
360
361 /*
362  * Initialize registers for flexible payload, which should be set by NVM.
363  * This should be removed from code once it is fixed in NVM.
364  */
365 #ifndef I40E_GLQF_ORT
366 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
367 #endif
368 #ifndef I40E_GLQF_PIT
369 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
370 #endif
371
372 static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
373 {
374         I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
375         I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
376         I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
377         I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
378         I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
379         I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
380         I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
381         I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
382         I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
383         I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
384
385         /* GLQF_PIT Registers */
386         I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
387         I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
388 }
389
390 static int
391 eth_i40e_dev_init(struct rte_eth_dev *dev)
392 {
393         struct rte_pci_device *pci_dev;
394         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
395         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
396         struct i40e_vsi *vsi;
397         int ret;
398         uint32_t len;
399         uint8_t aq_fail = 0;
400
401         PMD_INIT_FUNC_TRACE();
402
403         dev->dev_ops = &i40e_eth_dev_ops;
404         dev->rx_pkt_burst = i40e_recv_pkts;
405         dev->tx_pkt_burst = i40e_xmit_pkts;
406
407         /* for secondary processes, we don't initialise any further as primary
408          * has already done this work. Only check we don't need a different
409          * RX function */
410         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
411                 if (dev->data->scattered_rx)
412                         dev->rx_pkt_burst = i40e_recv_scattered_pkts;
413                 return 0;
414         }
415         pci_dev = dev->pci_dev;
416         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
417         pf->adapter->eth_dev = dev;
418         pf->dev_data = dev->data;
419
420         hw->back = I40E_PF_TO_ADAPTER(pf);
421         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
422         if (!hw->hw_addr) {
423                 PMD_INIT_LOG(ERR, "Hardware is not available, "
424                              "as address is NULL");
425                 return -ENODEV;
426         }
427
428         hw->vendor_id = pci_dev->id.vendor_id;
429         hw->device_id = pci_dev->id.device_id;
430         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
431         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
432         hw->bus.device = pci_dev->addr.devid;
433         hw->bus.func = pci_dev->addr.function;
434
435         /* Make sure all is clean before doing PF reset */
436         i40e_clear_hw(hw);
437
438         /* Initialize the hardware */
439         i40e_hw_init(hw);
440
441         /* Reset here to make sure all is clean for each PF */
442         ret = i40e_pf_reset(hw);
443         if (ret) {
444                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
445                 return ret;
446         }
447
448         /* Initialize the shared code (base driver) */
449         ret = i40e_init_shared_code(hw);
450         if (ret) {
451                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
452                 return ret;
453         }
454
455         /*
456          * To work around the NVM issue,initialize registers
457          * for flexible payload by software.
458          * It should be removed once issues are fixed in NVM.
459          */
460         i40e_flex_payload_reg_init(hw);
461
462         /* Initialize the parameters for adminq */
463         i40e_init_adminq_parameter(hw);
464         ret = i40e_init_adminq(hw);
465         if (ret != I40E_SUCCESS) {
466                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
467                 return -EIO;
468         }
469         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
470                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
471                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
472                      ((hw->nvm.version >> 12) & 0xf),
473                      ((hw->nvm.version >> 4) & 0xff),
474                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
475
476         /* Disable LLDP */
477         ret = i40e_aq_stop_lldp(hw, true, NULL);
478         if (ret != I40E_SUCCESS) /* Its failure can be ignored */
479                 PMD_INIT_LOG(INFO, "Failed to stop lldp");
480
481         /* Clear PXE mode */
482         i40e_clear_pxe_mode(hw);
483
484         /*
485          * On X710, performance number is far from the expectation on recent
486          * firmware versions. The fix for this issue may not be integrated in
487          * the following firmware version. So the workaround in software driver
488          * is needed. It needs to modify the initial values of 3 internal only
489          * registers. Note that the workaround can be removed when it is fixed
490          * in firmware in the future.
491          */
492         i40e_configure_registers(hw);
493
494         /* Get hw capabilities */
495         ret = i40e_get_cap(hw);
496         if (ret != I40E_SUCCESS) {
497                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
498                 goto err_get_capabilities;
499         }
500
501         /* Initialize parameters for PF */
502         ret = i40e_pf_parameter_init(dev);
503         if (ret != 0) {
504                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
505                 goto err_parameter_init;
506         }
507
508         /* Initialize the queue management */
509         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
510         if (ret < 0) {
511                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
512                 goto err_qp_pool_init;
513         }
514         ret = i40e_res_pool_init(&pf->msix_pool, 1,
515                                 hw->func_caps.num_msix_vectors - 1);
516         if (ret < 0) {
517                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
518                 goto err_msix_pool_init;
519         }
520
521         /* Initialize lan hmc */
522         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
523                                 hw->func_caps.num_rx_qp, 0, 0);
524         if (ret != I40E_SUCCESS) {
525                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
526                 goto err_init_lan_hmc;
527         }
528
529         /* Configure lan hmc */
530         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
531         if (ret != I40E_SUCCESS) {
532                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
533                 goto err_configure_lan_hmc;
534         }
535
536         /* Get and check the mac address */
537         i40e_get_mac_addr(hw, hw->mac.addr);
538         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
539                 PMD_INIT_LOG(ERR, "mac address is not valid");
540                 ret = -EIO;
541                 goto err_get_mac_addr;
542         }
543         /* Copy the permanent MAC address */
544         ether_addr_copy((struct ether_addr *) hw->mac.addr,
545                         (struct ether_addr *) hw->mac.perm_addr);
546
547         /* Disable flow control */
548         hw->fc.requested_mode = I40E_FC_NONE;
549         i40e_set_fc(hw, &aq_fail, TRUE);
550
551         /* PF setup, which includes VSI setup */
552         ret = i40e_pf_setup(pf);
553         if (ret) {
554                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
555                 goto err_setup_pf_switch;
556         }
557
558         vsi = pf->main_vsi;
559
560         /* Disable double vlan by default */
561         i40e_vsi_config_double_vlan(vsi, FALSE);
562
563         if (!vsi->max_macaddrs)
564                 len = ETHER_ADDR_LEN;
565         else
566                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
567
568         /* Should be after VSI initialized */
569         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
570         if (!dev->data->mac_addrs) {
571                 PMD_INIT_LOG(ERR, "Failed to allocated memory "
572                                         "for storing mac address");
573                 goto err_mac_alloc;
574         }
575         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
576                                         &dev->data->mac_addrs[0]);
577
578         /* initialize pf host driver to setup SRIOV resource if applicable */
579         i40e_pf_host_init(dev);
580
581         /* register callback func to eal lib */
582         rte_intr_callback_register(&(pci_dev->intr_handle),
583                 i40e_dev_interrupt_handler, (void *)dev);
584
585         /* configure and enable device interrupt */
586         i40e_pf_config_irq0(hw);
587         i40e_pf_enable_irq0(hw);
588
589         /* enable uio intr after callback register */
590         rte_intr_enable(&(pci_dev->intr_handle));
591
592         /* initialize mirror rule list */
593         TAILQ_INIT(&pf->mirror_list);
594
595         return 0;
596
597 err_mac_alloc:
598         i40e_vsi_release(pf->main_vsi);
599 err_setup_pf_switch:
600 err_get_mac_addr:
601 err_configure_lan_hmc:
602         (void)i40e_shutdown_lan_hmc(hw);
603 err_init_lan_hmc:
604         i40e_res_pool_destroy(&pf->msix_pool);
605 err_msix_pool_init:
606         i40e_res_pool_destroy(&pf->qp_pool);
607 err_qp_pool_init:
608 err_parameter_init:
609 err_get_capabilities:
610         (void)i40e_shutdown_adminq(hw);
611
612         return ret;
613 }
614
615 static int
616 i40e_dev_configure(struct rte_eth_dev *dev)
617 {
618         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
619         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
620         int ret;
621
622         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
623                 ret = i40e_fdir_setup(pf);
624                 if (ret != I40E_SUCCESS) {
625                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
626                         return -ENOTSUP;
627                 }
628                 ret = i40e_fdir_configure(dev);
629                 if (ret < 0) {
630                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
631                         goto err;
632                 }
633         } else
634                 i40e_fdir_teardown(pf);
635
636         ret = i40e_dev_init_vlan(dev);
637         if (ret < 0)
638                 goto err;
639
640         /* VMDQ setup.
641          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
642          *  RSS setting have different requirements.
643          *  General PMD driver call sequence are NIC init, configure,
644          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
645          *  will try to lookup the VSI that specific queue belongs to if VMDQ
646          *  applicable. So, VMDQ setting has to be done before
647          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
648          *  For RSS setting, it will try to calculate actual configured RX queue
649          *  number, which will be available after rx_queue_setup(). dev_start()
650          *  function is good to place RSS setup.
651          */
652         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
653                 ret = i40e_vmdq_setup(dev);
654                 if (ret)
655                         goto err;
656         }
657         return 0;
658 err:
659         i40e_fdir_teardown(pf);
660         return ret;
661 }
662
663 void
664 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
665 {
666         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
667         uint16_t msix_vect = vsi->msix_intr;
668         uint16_t i;
669
670         for (i = 0; i < vsi->nb_qps; i++) {
671                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
672                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
673                 rte_wmb();
674         }
675
676         if (vsi->type != I40E_VSI_SRIOV) {
677                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
678                 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
679                                 msix_vect - 1), 0);
680         } else {
681                 uint32_t reg;
682                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
683                         vsi->user_param + (msix_vect - 1);
684
685                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
686         }
687         I40E_WRITE_FLUSH(hw);
688 }
689
690 static inline uint16_t
691 i40e_calc_itr_interval(int16_t interval)
692 {
693         if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
694                 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
695
696         /* Convert to hardware count, as writing each 1 represents 2 us */
697         return (interval/2);
698 }
699
700 void
701 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
702 {
703         uint32_t val;
704         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
705         uint16_t msix_vect = vsi->msix_intr;
706         int i;
707
708         for (i = 0; i < vsi->nb_qps; i++)
709                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
710
711         /* Bind all RX queues to allocated MSIX interrupt */
712         for (i = 0; i < vsi->nb_qps; i++) {
713                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
714                         I40E_QINT_RQCTL_ITR_INDX_MASK |
715                         ((vsi->base_queue + i + 1) <<
716                         I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
717                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
718                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
719
720                 if (i == vsi->nb_qps - 1)
721                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
722                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
723         }
724
725         /* Write first RX queue to Link list register as the head element */
726         if (vsi->type != I40E_VSI_SRIOV) {
727                 uint16_t interval =
728                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
729
730                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
731                                                 (vsi->base_queue <<
732                                 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
733                         (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
734
735                 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
736                                                 msix_vect - 1), interval);
737
738 #ifndef I40E_GLINT_CTL
739 #define I40E_GLINT_CTL                     0x0003F800
740 #define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
741 #endif
742                 /* Disable auto-mask on enabling of all none-zero  interrupt */
743                 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
744                         I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
745         } else {
746                 uint32_t reg;
747
748                 /* num_msix_vectors_vf needs to minus irq0 */
749                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
750                         vsi->user_param + (msix_vect - 1);
751
752                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
753                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
754                                 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
755         }
756
757         I40E_WRITE_FLUSH(hw);
758 }
759
760 static void
761 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
762 {
763         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
764         uint16_t interval = i40e_calc_itr_interval(\
765                         RTE_LIBRTE_I40E_ITR_INTERVAL);
766
767         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
768                                         I40E_PFINT_DYN_CTLN_INTENA_MASK |
769                                         I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
770                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
771                         (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
772 }
773
774 static void
775 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
776 {
777         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
778
779         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
780 }
781
782 static inline uint8_t
783 i40e_parse_link_speed(uint16_t eth_link_speed)
784 {
785         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
786
787         switch (eth_link_speed) {
788         case ETH_LINK_SPEED_40G:
789                 link_speed = I40E_LINK_SPEED_40GB;
790                 break;
791         case ETH_LINK_SPEED_20G:
792                 link_speed = I40E_LINK_SPEED_20GB;
793                 break;
794         case ETH_LINK_SPEED_10G:
795                 link_speed = I40E_LINK_SPEED_10GB;
796                 break;
797         case ETH_LINK_SPEED_1000:
798                 link_speed = I40E_LINK_SPEED_1GB;
799                 break;
800         case ETH_LINK_SPEED_100:
801                 link_speed = I40E_LINK_SPEED_100MB;
802                 break;
803         }
804
805         return link_speed;
806 }
807
808 static int
809 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
810 {
811         enum i40e_status_code status;
812         struct i40e_aq_get_phy_abilities_resp phy_ab;
813         struct i40e_aq_set_phy_config phy_conf;
814         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
815                         I40E_AQ_PHY_FLAG_PAUSE_RX |
816                         I40E_AQ_PHY_FLAG_LOW_POWER;
817         const uint8_t advt = I40E_LINK_SPEED_40GB |
818                         I40E_LINK_SPEED_10GB |
819                         I40E_LINK_SPEED_1GB |
820                         I40E_LINK_SPEED_100MB;
821         int ret = -ENOTSUP;
822
823         /* Skip it on 40G interfaces, as a workaround for the link issue */
824         if (i40e_is_40G_device(hw->device_id))
825                 return I40E_SUCCESS;
826
827         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
828                                               NULL);
829         if (status)
830                 return ret;
831
832         memset(&phy_conf, 0, sizeof(phy_conf));
833
834         /* bits 0-2 use the values from get_phy_abilities_resp */
835         abilities &= ~mask;
836         abilities |= phy_ab.abilities & mask;
837
838         /* update ablities and speed */
839         if (abilities & I40E_AQ_PHY_AN_ENABLED)
840                 phy_conf.link_speed = advt;
841         else
842                 phy_conf.link_speed = force_speed;
843
844         phy_conf.abilities = abilities;
845
846         /* use get_phy_abilities_resp value for the rest */
847         phy_conf.phy_type = phy_ab.phy_type;
848         phy_conf.eee_capability = phy_ab.eee_capability;
849         phy_conf.eeer = phy_ab.eeer_val;
850         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
851
852         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
853                     phy_ab.abilities, phy_ab.link_speed);
854         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
855                     phy_conf.abilities, phy_conf.link_speed);
856
857         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
858         if (status)
859                 return ret;
860
861         return I40E_SUCCESS;
862 }
863
864 static int
865 i40e_apply_link_speed(struct rte_eth_dev *dev)
866 {
867         uint8_t speed;
868         uint8_t abilities = 0;
869         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
870         struct rte_eth_conf *conf = &dev->data->dev_conf;
871
872         speed = i40e_parse_link_speed(conf->link_speed);
873         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
874         if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
875                 abilities |= I40E_AQ_PHY_AN_ENABLED;
876         else
877                 abilities |= I40E_AQ_PHY_LINK_ENABLED;
878
879         return i40e_phy_conf_link(hw, abilities, speed);
880 }
881
882 static int
883 i40e_dev_start(struct rte_eth_dev *dev)
884 {
885         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
886         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
887         struct i40e_vsi *main_vsi = pf->main_vsi;
888         int ret, i;
889
890         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
891                 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
892                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
893                              dev->data->dev_conf.link_duplex,
894                              dev->data->port_id);
895                 return -EINVAL;
896         }
897
898         /* Initialize VSI */
899         ret = i40e_dev_rxtx_init(pf);
900         if (ret != I40E_SUCCESS) {
901                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
902                 goto err_up;
903         }
904
905         /* Map queues with MSIX interrupt */
906         i40e_vsi_queues_bind_intr(main_vsi);
907         i40e_vsi_enable_queues_intr(main_vsi);
908
909         /* Map VMDQ VSI queues with MSIX interrupt */
910         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
911                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
912                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
913         }
914
915         /* enable FDIR MSIX interrupt */
916         if (pf->fdir.fdir_vsi) {
917                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
918                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
919         }
920
921         /* Enable all queues which have been configured */
922         ret = i40e_dev_switch_queues(pf, TRUE);
923         if (ret != I40E_SUCCESS) {
924                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
925                 goto err_up;
926         }
927
928         /* Enable receiving broadcast packets */
929         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
930         if (ret != I40E_SUCCESS)
931                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
932
933         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
934                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
935                                                 true, NULL);
936                 if (ret != I40E_SUCCESS)
937                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
938         }
939
940         /* Apply link configure */
941         ret = i40e_apply_link_speed(dev);
942         if (I40E_SUCCESS != ret) {
943                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
944                 goto err_up;
945         }
946
947         return I40E_SUCCESS;
948
949 err_up:
950         i40e_dev_switch_queues(pf, FALSE);
951         i40e_dev_clear_queues(dev);
952
953         return ret;
954 }
955
956 static void
957 i40e_dev_stop(struct rte_eth_dev *dev)
958 {
959         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
960         struct i40e_vsi *main_vsi = pf->main_vsi;
961         struct i40e_mirror_rule *p_mirror;
962         int i;
963
964         /* Disable all queues */
965         i40e_dev_switch_queues(pf, FALSE);
966
967         /* un-map queues with interrupt registers */
968         i40e_vsi_disable_queues_intr(main_vsi);
969         i40e_vsi_queues_unbind_intr(main_vsi);
970
971         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
972                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
973                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
974         }
975
976         if (pf->fdir.fdir_vsi) {
977                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
978                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
979         }
980         /* Clear all queues and release memory */
981         i40e_dev_clear_queues(dev);
982
983         /* Set link down */
984         i40e_dev_set_link_down(dev);
985
986         /* Remove all mirror rules */
987         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
988                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
989                 rte_free(p_mirror);
990         }
991         pf->nb_mirror_rule = 0;
992
993 }
994
995 static void
996 i40e_dev_close(struct rte_eth_dev *dev)
997 {
998         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
999         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1000         uint32_t reg;
1001
1002         PMD_INIT_FUNC_TRACE();
1003
1004         i40e_dev_stop(dev);
1005
1006         /* Disable interrupt */
1007         i40e_pf_disable_irq0(hw);
1008         rte_intr_disable(&(dev->pci_dev->intr_handle));
1009
1010         /* shutdown and destroy the HMC */
1011         i40e_shutdown_lan_hmc(hw);
1012
1013         /* release all the existing VSIs and VEBs */
1014         i40e_fdir_teardown(pf);
1015         i40e_vsi_release(pf->main_vsi);
1016
1017         /* shutdown the adminq */
1018         i40e_aq_queue_shutdown(hw, true);
1019         i40e_shutdown_adminq(hw);
1020
1021         i40e_res_pool_destroy(&pf->qp_pool);
1022         i40e_res_pool_destroy(&pf->msix_pool);
1023
1024         /* force a PF reset to clean anything leftover */
1025         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
1026         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
1027                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1028         I40E_WRITE_FLUSH(hw);
1029 }
1030
1031 static void
1032 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
1033 {
1034         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1035         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1036         struct i40e_vsi *vsi = pf->main_vsi;
1037         int status;
1038
1039         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1040                                                         true, NULL);
1041         if (status != I40E_SUCCESS)
1042                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
1043
1044         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1045                                                         TRUE, NULL);
1046         if (status != I40E_SUCCESS)
1047                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1048
1049 }
1050
1051 static void
1052 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
1053 {
1054         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1055         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1056         struct i40e_vsi *vsi = pf->main_vsi;
1057         int status;
1058
1059         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1060                                                         false, NULL);
1061         if (status != I40E_SUCCESS)
1062                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
1063
1064         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1065                                                         false, NULL);
1066         if (status != I40E_SUCCESS)
1067                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1068 }
1069
1070 static void
1071 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
1072 {
1073         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1074         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1075         struct i40e_vsi *vsi = pf->main_vsi;
1076         int ret;
1077
1078         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
1079         if (ret != I40E_SUCCESS)
1080                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1081 }
1082
1083 static void
1084 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
1085 {
1086         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1087         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1088         struct i40e_vsi *vsi = pf->main_vsi;
1089         int ret;
1090
1091         if (dev->data->promiscuous == 1)
1092                 return; /* must remain in all_multicast mode */
1093
1094         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
1095                                 vsi->seid, FALSE, NULL);
1096         if (ret != I40E_SUCCESS)
1097                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1098 }
1099
1100 /*
1101  * Set device link up.
1102  */
1103 static int
1104 i40e_dev_set_link_up(struct rte_eth_dev *dev)
1105 {
1106         /* re-apply link speed setting */
1107         return i40e_apply_link_speed(dev);
1108 }
1109
1110 /*
1111  * Set device link down.
1112  */
1113 static int
1114 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
1115 {
1116         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
1117         uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1118         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1119
1120         return i40e_phy_conf_link(hw, abilities, speed);
1121 }
1122
1123 int
1124 i40e_dev_link_update(struct rte_eth_dev *dev,
1125                      int wait_to_complete)
1126 {
1127 #define CHECK_INTERVAL 100  /* 100ms */
1128 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1129         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1130         struct i40e_link_status link_status;
1131         struct rte_eth_link link, old;
1132         int status;
1133         unsigned rep_cnt = MAX_REPEAT_TIME;
1134
1135         memset(&link, 0, sizeof(link));
1136         memset(&old, 0, sizeof(old));
1137         memset(&link_status, 0, sizeof(link_status));
1138         rte_i40e_dev_atomic_read_link_status(dev, &old);
1139
1140         do {
1141                 /* Get link status information from hardware */
1142                 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
1143                 if (status != I40E_SUCCESS) {
1144                         link.link_speed = ETH_LINK_SPEED_100;
1145                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1146                         PMD_DRV_LOG(ERR, "Failed to get link info");
1147                         goto out;
1148                 }
1149
1150                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
1151                 if (!wait_to_complete)
1152                         break;
1153
1154                 rte_delay_ms(CHECK_INTERVAL);
1155         } while (!link.link_status && rep_cnt--);
1156
1157         if (!link.link_status)
1158                 goto out;
1159
1160         /* i40e uses full duplex only */
1161         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1162
1163         /* Parse the link status */
1164         switch (link_status.link_speed) {
1165         case I40E_LINK_SPEED_100MB:
1166                 link.link_speed = ETH_LINK_SPEED_100;
1167                 break;
1168         case I40E_LINK_SPEED_1GB:
1169                 link.link_speed = ETH_LINK_SPEED_1000;
1170                 break;
1171         case I40E_LINK_SPEED_10GB:
1172                 link.link_speed = ETH_LINK_SPEED_10G;
1173                 break;
1174         case I40E_LINK_SPEED_20GB:
1175                 link.link_speed = ETH_LINK_SPEED_20G;
1176                 break;
1177         case I40E_LINK_SPEED_40GB:
1178                 link.link_speed = ETH_LINK_SPEED_40G;
1179                 break;
1180         default:
1181                 link.link_speed = ETH_LINK_SPEED_100;
1182                 break;
1183         }
1184
1185 out:
1186         rte_i40e_dev_atomic_write_link_status(dev, &link);
1187         if (link.link_status == old.link_status)
1188                 return -1;
1189
1190         return 0;
1191 }
1192
1193 /* Get all the statistics of a VSI */
1194 void
1195 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1196 {
1197         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1198         struct i40e_eth_stats *nes = &vsi->eth_stats;
1199         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1200         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1201
1202         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1203                             vsi->offset_loaded, &oes->rx_bytes,
1204                             &nes->rx_bytes);
1205         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1206                             vsi->offset_loaded, &oes->rx_unicast,
1207                             &nes->rx_unicast);
1208         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1209                             vsi->offset_loaded, &oes->rx_multicast,
1210                             &nes->rx_multicast);
1211         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1212                             vsi->offset_loaded, &oes->rx_broadcast,
1213                             &nes->rx_broadcast);
1214         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1215                             &oes->rx_discards, &nes->rx_discards);
1216         /* GLV_REPC not supported */
1217         /* GLV_RMPC not supported */
1218         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1219                             &oes->rx_unknown_protocol,
1220                             &nes->rx_unknown_protocol);
1221         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1222                             vsi->offset_loaded, &oes->tx_bytes,
1223                             &nes->tx_bytes);
1224         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1225                             vsi->offset_loaded, &oes->tx_unicast,
1226                             &nes->tx_unicast);
1227         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1228                             vsi->offset_loaded, &oes->tx_multicast,
1229                             &nes->tx_multicast);
1230         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1231                             vsi->offset_loaded,  &oes->tx_broadcast,
1232                             &nes->tx_broadcast);
1233         /* GLV_TDPC not supported */
1234         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1235                             &oes->tx_errors, &nes->tx_errors);
1236         vsi->offset_loaded = true;
1237
1238         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1239                     vsi->vsi_id);
1240         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
1241         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
1242         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
1243         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
1244         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
1245         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1246                     nes->rx_unknown_protocol);
1247         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
1248         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
1249         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
1250         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
1251         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
1252         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
1253         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1254                     vsi->vsi_id);
1255 }
1256
1257 /* Get all statistics of a port */
1258 static void
1259 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1260 {
1261         uint32_t i;
1262         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1263         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1264         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1265         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1266
1267         /* Get statistics of struct i40e_eth_stats */
1268         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1269                             I40E_GLPRT_GORCL(hw->port),
1270                             pf->offset_loaded, &os->eth.rx_bytes,
1271                             &ns->eth.rx_bytes);
1272         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1273                             I40E_GLPRT_UPRCL(hw->port),
1274                             pf->offset_loaded, &os->eth.rx_unicast,
1275                             &ns->eth.rx_unicast);
1276         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1277                             I40E_GLPRT_MPRCL(hw->port),
1278                             pf->offset_loaded, &os->eth.rx_multicast,
1279                             &ns->eth.rx_multicast);
1280         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1281                             I40E_GLPRT_BPRCL(hw->port),
1282                             pf->offset_loaded, &os->eth.rx_broadcast,
1283                             &ns->eth.rx_broadcast);
1284         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1285                             pf->offset_loaded, &os->eth.rx_discards,
1286                             &ns->eth.rx_discards);
1287         /* GLPRT_REPC not supported */
1288         /* GLPRT_RMPC not supported */
1289         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1290                             pf->offset_loaded,
1291                             &os->eth.rx_unknown_protocol,
1292                             &ns->eth.rx_unknown_protocol);
1293         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1294                             I40E_GLPRT_GOTCL(hw->port),
1295                             pf->offset_loaded, &os->eth.tx_bytes,
1296                             &ns->eth.tx_bytes);
1297         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1298                             I40E_GLPRT_UPTCL(hw->port),
1299                             pf->offset_loaded, &os->eth.tx_unicast,
1300                             &ns->eth.tx_unicast);
1301         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1302                             I40E_GLPRT_MPTCL(hw->port),
1303                             pf->offset_loaded, &os->eth.tx_multicast,
1304                             &ns->eth.tx_multicast);
1305         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1306                             I40E_GLPRT_BPTCL(hw->port),
1307                             pf->offset_loaded, &os->eth.tx_broadcast,
1308                             &ns->eth.tx_broadcast);
1309         /* GLPRT_TEPC not supported */
1310
1311         /* additional port specific stats */
1312         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1313                             pf->offset_loaded, &os->tx_dropped_link_down,
1314                             &ns->tx_dropped_link_down);
1315         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1316                             pf->offset_loaded, &os->crc_errors,
1317                             &ns->crc_errors);
1318         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1319                             pf->offset_loaded, &os->illegal_bytes,
1320                             &ns->illegal_bytes);
1321         /* GLPRT_ERRBC not supported */
1322         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1323                             pf->offset_loaded, &os->mac_local_faults,
1324                             &ns->mac_local_faults);
1325         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1326                             pf->offset_loaded, &os->mac_remote_faults,
1327                             &ns->mac_remote_faults);
1328         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1329                             pf->offset_loaded, &os->rx_length_errors,
1330                             &ns->rx_length_errors);
1331         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1332                             pf->offset_loaded, &os->link_xon_rx,
1333                             &ns->link_xon_rx);
1334         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1335                             pf->offset_loaded, &os->link_xoff_rx,
1336                             &ns->link_xoff_rx);
1337         for (i = 0; i < 8; i++) {
1338                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1339                                     pf->offset_loaded,
1340                                     &os->priority_xon_rx[i],
1341                                     &ns->priority_xon_rx[i]);
1342                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1343                                     pf->offset_loaded,
1344                                     &os->priority_xoff_rx[i],
1345                                     &ns->priority_xoff_rx[i]);
1346         }
1347         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1348                             pf->offset_loaded, &os->link_xon_tx,
1349                             &ns->link_xon_tx);
1350         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1351                             pf->offset_loaded, &os->link_xoff_tx,
1352                             &ns->link_xoff_tx);
1353         for (i = 0; i < 8; i++) {
1354                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1355                                     pf->offset_loaded,
1356                                     &os->priority_xon_tx[i],
1357                                     &ns->priority_xon_tx[i]);
1358                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1359                                     pf->offset_loaded,
1360                                     &os->priority_xoff_tx[i],
1361                                     &ns->priority_xoff_tx[i]);
1362                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1363                                     pf->offset_loaded,
1364                                     &os->priority_xon_2_xoff[i],
1365                                     &ns->priority_xon_2_xoff[i]);
1366         }
1367         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1368                             I40E_GLPRT_PRC64L(hw->port),
1369                             pf->offset_loaded, &os->rx_size_64,
1370                             &ns->rx_size_64);
1371         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1372                             I40E_GLPRT_PRC127L(hw->port),
1373                             pf->offset_loaded, &os->rx_size_127,
1374                             &ns->rx_size_127);
1375         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1376                             I40E_GLPRT_PRC255L(hw->port),
1377                             pf->offset_loaded, &os->rx_size_255,
1378                             &ns->rx_size_255);
1379         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1380                             I40E_GLPRT_PRC511L(hw->port),
1381                             pf->offset_loaded, &os->rx_size_511,
1382                             &ns->rx_size_511);
1383         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1384                             I40E_GLPRT_PRC1023L(hw->port),
1385                             pf->offset_loaded, &os->rx_size_1023,
1386                             &ns->rx_size_1023);
1387         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1388                             I40E_GLPRT_PRC1522L(hw->port),
1389                             pf->offset_loaded, &os->rx_size_1522,
1390                             &ns->rx_size_1522);
1391         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1392                             I40E_GLPRT_PRC9522L(hw->port),
1393                             pf->offset_loaded, &os->rx_size_big,
1394                             &ns->rx_size_big);
1395         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1396                             pf->offset_loaded, &os->rx_undersize,
1397                             &ns->rx_undersize);
1398         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1399                             pf->offset_loaded, &os->rx_fragments,
1400                             &ns->rx_fragments);
1401         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1402                             pf->offset_loaded, &os->rx_oversize,
1403                             &ns->rx_oversize);
1404         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1405                             pf->offset_loaded, &os->rx_jabber,
1406                             &ns->rx_jabber);
1407         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1408                             I40E_GLPRT_PTC64L(hw->port),
1409                             pf->offset_loaded, &os->tx_size_64,
1410                             &ns->tx_size_64);
1411         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1412                             I40E_GLPRT_PTC127L(hw->port),
1413                             pf->offset_loaded, &os->tx_size_127,
1414                             &ns->tx_size_127);
1415         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1416                             I40E_GLPRT_PTC255L(hw->port),
1417                             pf->offset_loaded, &os->tx_size_255,
1418                             &ns->tx_size_255);
1419         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1420                             I40E_GLPRT_PTC511L(hw->port),
1421                             pf->offset_loaded, &os->tx_size_511,
1422                             &ns->tx_size_511);
1423         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1424                             I40E_GLPRT_PTC1023L(hw->port),
1425                             pf->offset_loaded, &os->tx_size_1023,
1426                             &ns->tx_size_1023);
1427         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1428                             I40E_GLPRT_PTC1522L(hw->port),
1429                             pf->offset_loaded, &os->tx_size_1522,
1430                             &ns->tx_size_1522);
1431         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1432                             I40E_GLPRT_PTC9522L(hw->port),
1433                             pf->offset_loaded, &os->tx_size_big,
1434                             &ns->tx_size_big);
1435         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
1436                            pf->offset_loaded,
1437                            &os->fd_sb_match, &ns->fd_sb_match);
1438         /* GLPRT_MSPDC not supported */
1439         /* GLPRT_XEC not supported */
1440
1441         pf->offset_loaded = true;
1442
1443         if (pf->main_vsi)
1444                 i40e_update_vsi_stats(pf->main_vsi);
1445
1446         stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1447                                                 ns->eth.rx_broadcast;
1448         stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1449                                                 ns->eth.tx_broadcast;
1450         stats->ibytes   = ns->eth.rx_bytes;
1451         stats->obytes   = ns->eth.tx_bytes;
1452         stats->oerrors  = ns->eth.tx_errors;
1453         stats->imcasts  = ns->eth.rx_multicast;
1454         stats->fdirmatch = ns->fd_sb_match;
1455
1456         /* Rx Errors */
1457         stats->ibadcrc  = ns->crc_errors;
1458         stats->ibadlen  = ns->rx_length_errors + ns->rx_undersize +
1459                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
1460         stats->imissed  = ns->eth.rx_discards;
1461         stats->ierrors  = stats->ibadcrc + stats->ibadlen + stats->imissed;
1462
1463         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
1464         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
1465         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
1466         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
1467         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
1468         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
1469         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1470                     ns->eth.rx_unknown_protocol);
1471         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
1472         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
1473         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
1474         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
1475         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
1476         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
1477
1478         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
1479                     ns->tx_dropped_link_down);
1480         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
1481         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
1482                     ns->illegal_bytes);
1483         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
1484         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
1485                     ns->mac_local_faults);
1486         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
1487                     ns->mac_remote_faults);
1488         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
1489                     ns->rx_length_errors);
1490         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
1491         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
1492         for (i = 0; i < 8; i++) {
1493                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
1494                                 i, ns->priority_xon_rx[i]);
1495                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
1496                                 i, ns->priority_xoff_rx[i]);
1497         }
1498         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
1499         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
1500         for (i = 0; i < 8; i++) {
1501                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
1502                                 i, ns->priority_xon_tx[i]);
1503                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
1504                                 i, ns->priority_xoff_tx[i]);
1505                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
1506                                 i, ns->priority_xon_2_xoff[i]);
1507         }
1508         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
1509         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
1510         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
1511         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
1512         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
1513         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
1514         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
1515         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
1516         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
1517         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
1518         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
1519         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
1520         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
1521         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
1522         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
1523         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
1524         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
1525         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
1526         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
1527                         ns->mac_short_packet_dropped);
1528         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
1529                     ns->checksum_error);
1530         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
1531         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
1532 }
1533
1534 /* Reset the statistics */
1535 static void
1536 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1537 {
1538         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1539
1540         /* It results in reloading the start point of each counter */
1541         pf->offset_loaded = false;
1542 }
1543
1544 static int
1545 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1546                                  __rte_unused uint16_t queue_id,
1547                                  __rte_unused uint8_t stat_idx,
1548                                  __rte_unused uint8_t is_rx)
1549 {
1550         PMD_INIT_FUNC_TRACE();
1551
1552         return -ENOSYS;
1553 }
1554
1555 static void
1556 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1557 {
1558         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1559         struct i40e_vsi *vsi = pf->main_vsi;
1560
1561         dev_info->max_rx_queues = vsi->nb_qps;
1562         dev_info->max_tx_queues = vsi->nb_qps;
1563         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1564         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1565         dev_info->max_mac_addrs = vsi->max_macaddrs;
1566         dev_info->max_vfs = dev->pci_dev->max_vfs;
1567         dev_info->rx_offload_capa =
1568                 DEV_RX_OFFLOAD_VLAN_STRIP |
1569                 DEV_RX_OFFLOAD_QINQ_STRIP |
1570                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1571                 DEV_RX_OFFLOAD_UDP_CKSUM |
1572                 DEV_RX_OFFLOAD_TCP_CKSUM;
1573         dev_info->tx_offload_capa =
1574                 DEV_TX_OFFLOAD_VLAN_INSERT |
1575                 DEV_TX_OFFLOAD_QINQ_INSERT |
1576                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1577                 DEV_TX_OFFLOAD_UDP_CKSUM |
1578                 DEV_TX_OFFLOAD_TCP_CKSUM |
1579                 DEV_TX_OFFLOAD_SCTP_CKSUM |
1580                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1581                 DEV_TX_OFFLOAD_TCP_TSO;
1582         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
1583                                                 sizeof(uint32_t);
1584         dev_info->reta_size = pf->hash_lut_size;
1585         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
1586
1587         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1588                 .rx_thresh = {
1589                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
1590                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
1591                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
1592                 },
1593                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1594                 .rx_drop_en = 0,
1595         };
1596
1597         dev_info->default_txconf = (struct rte_eth_txconf) {
1598                 .tx_thresh = {
1599                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
1600                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
1601                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
1602                 },
1603                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1604                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1605                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1606                                 ETH_TXQ_FLAGS_NOOFFLOADS,
1607         };
1608
1609         if (pf->flags & I40E_FLAG_VMDQ) {
1610                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
1611                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
1612                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
1613                                                 pf->max_nb_vmdq_vsi;
1614                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
1615                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
1616                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
1617         }
1618 }
1619
1620 static int
1621 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1622 {
1623         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1624         struct i40e_vsi *vsi = pf->main_vsi;
1625         PMD_INIT_FUNC_TRACE();
1626
1627         if (on)
1628                 return i40e_vsi_add_vlan(vsi, vlan_id);
1629         else
1630                 return i40e_vsi_delete_vlan(vsi, vlan_id);
1631 }
1632
1633 static void
1634 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1635                    __rte_unused uint16_t tpid)
1636 {
1637         PMD_INIT_FUNC_TRACE();
1638 }
1639
1640 static void
1641 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1642 {
1643         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1644         struct i40e_vsi *vsi = pf->main_vsi;
1645
1646         if (mask & ETH_VLAN_STRIP_MASK) {
1647                 /* Enable or disable VLAN stripping */
1648                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1649                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
1650                 else
1651                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
1652         }
1653
1654         if (mask & ETH_VLAN_EXTEND_MASK) {
1655                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1656                         i40e_vsi_config_double_vlan(vsi, TRUE);
1657                 else
1658                         i40e_vsi_config_double_vlan(vsi, FALSE);
1659         }
1660 }
1661
1662 static void
1663 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1664                           __rte_unused uint16_t queue,
1665                           __rte_unused int on)
1666 {
1667         PMD_INIT_FUNC_TRACE();
1668 }
1669
1670 static int
1671 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1672 {
1673         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1674         struct i40e_vsi *vsi = pf->main_vsi;
1675         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1676         struct i40e_vsi_vlan_pvid_info info;
1677
1678         memset(&info, 0, sizeof(info));
1679         info.on = on;
1680         if (info.on)
1681                 info.config.pvid = pvid;
1682         else {
1683                 info.config.reject.tagged =
1684                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
1685                 info.config.reject.untagged =
1686                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
1687         }
1688
1689         return i40e_vsi_vlan_pvid_set(vsi, &info);
1690 }
1691
1692 static int
1693 i40e_dev_led_on(struct rte_eth_dev *dev)
1694 {
1695         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1696         uint32_t mode = i40e_led_get(hw);
1697
1698         if (mode == 0)
1699                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1700
1701         return 0;
1702 }
1703
1704 static int
1705 i40e_dev_led_off(struct rte_eth_dev *dev)
1706 {
1707         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1708         uint32_t mode = i40e_led_get(hw);
1709
1710         if (mode != 0)
1711                 i40e_led_set(hw, 0, false);
1712
1713         return 0;
1714 }
1715
1716 static int
1717 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1718                    __rte_unused struct rte_eth_fc_conf *fc_conf)
1719 {
1720         PMD_INIT_FUNC_TRACE();
1721
1722         return -ENOSYS;
1723 }
1724
1725 static int
1726 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1727                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1728 {
1729         PMD_INIT_FUNC_TRACE();
1730
1731         return -ENOSYS;
1732 }
1733
1734 /* Add a MAC address, and update filters */
1735 static void
1736 i40e_macaddr_add(struct rte_eth_dev *dev,
1737                  struct ether_addr *mac_addr,
1738                  __rte_unused uint32_t index,
1739                  uint32_t pool)
1740 {
1741         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1742         struct i40e_mac_filter_info mac_filter;
1743         struct i40e_vsi *vsi;
1744         int ret;
1745
1746         /* If VMDQ not enabled or configured, return */
1747         if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
1748                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
1749                         pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
1750                         pool);
1751                 return;
1752         }
1753
1754         if (pool > pf->nb_cfg_vmdq_vsi) {
1755                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
1756                                 pool, pf->nb_cfg_vmdq_vsi);
1757                 return;
1758         }
1759
1760         (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
1761         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
1762
1763         if (pool == 0)
1764                 vsi = pf->main_vsi;
1765         else
1766                 vsi = pf->vmdq[pool - 1].vsi;
1767
1768         ret = i40e_vsi_add_mac(vsi, &mac_filter);
1769         if (ret != I40E_SUCCESS) {
1770                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
1771                 return;
1772         }
1773 }
1774
1775 /* Remove a MAC address, and update filters */
1776 static void
1777 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1778 {
1779         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1780         struct i40e_vsi *vsi;
1781         struct rte_eth_dev_data *data = dev->data;
1782         struct ether_addr *macaddr;
1783         int ret;
1784         uint32_t i;
1785         uint64_t pool_sel;
1786
1787         macaddr = &(data->mac_addrs[index]);
1788
1789         pool_sel = dev->data->mac_pool_sel[index];
1790
1791         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
1792                 if (pool_sel & (1ULL << i)) {
1793                         if (i == 0)
1794                                 vsi = pf->main_vsi;
1795                         else {
1796                                 /* No VMDQ pool enabled or configured */
1797                                 if (!(pf->flags | I40E_FLAG_VMDQ) ||
1798                                         (i > pf->nb_cfg_vmdq_vsi)) {
1799                                         PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
1800                                                         "/configured");
1801                                         return;
1802                                 }
1803                                 vsi = pf->vmdq[i - 1].vsi;
1804                         }
1805                         ret = i40e_vsi_delete_mac(vsi, macaddr);
1806
1807                         if (ret) {
1808                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
1809                                 return;
1810                         }
1811                 }
1812         }
1813 }
1814
1815 /* Set perfect match or hash match of MAC and VLAN for a VF */
1816 static int
1817 i40e_vf_mac_filter_set(struct i40e_pf *pf,
1818                  struct rte_eth_mac_filter *filter,
1819                  bool add)
1820 {
1821         struct i40e_hw *hw;
1822         struct i40e_mac_filter_info mac_filter;
1823         struct ether_addr old_mac;
1824         struct ether_addr *new_mac;
1825         struct i40e_pf_vf *vf = NULL;
1826         uint16_t vf_id;
1827         int ret;
1828
1829         if (pf == NULL) {
1830                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
1831                 return -EINVAL;
1832         }
1833         hw = I40E_PF_TO_HW(pf);
1834
1835         if (filter == NULL) {
1836                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
1837                 return -EINVAL;
1838         }
1839
1840         new_mac = &filter->mac_addr;
1841
1842         if (is_zero_ether_addr(new_mac)) {
1843                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
1844                 return -EINVAL;
1845         }
1846
1847         vf_id = filter->dst_id;
1848
1849         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1850                 PMD_DRV_LOG(ERR, "Invalid argument.");
1851                 return -EINVAL;
1852         }
1853         vf = &pf->vfs[vf_id];
1854
1855         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
1856                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
1857                 return -EINVAL;
1858         }
1859
1860         if (add) {
1861                 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1862                 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
1863                                 ETHER_ADDR_LEN);
1864                 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
1865                                  ETHER_ADDR_LEN);
1866
1867                 mac_filter.filter_type = filter->filter_type;
1868                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
1869                 if (ret != I40E_SUCCESS) {
1870                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
1871                         return -1;
1872                 }
1873                 ether_addr_copy(new_mac, &pf->dev_addr);
1874         } else {
1875                 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
1876                                 ETHER_ADDR_LEN);
1877                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
1878                 if (ret != I40E_SUCCESS) {
1879                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
1880                         return -1;
1881                 }
1882
1883                 /* Clear device address as it has been removed */
1884                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
1885                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1886         }
1887
1888         return 0;
1889 }
1890
1891 /* MAC filter handle */
1892 static int
1893 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1894                 void *arg)
1895 {
1896         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1897         struct rte_eth_mac_filter *filter;
1898         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1899         int ret = I40E_NOT_SUPPORTED;
1900
1901         filter = (struct rte_eth_mac_filter *)(arg);
1902
1903         switch (filter_op) {
1904         case RTE_ETH_FILTER_NOP:
1905                 ret = I40E_SUCCESS;
1906                 break;
1907         case RTE_ETH_FILTER_ADD:
1908                 i40e_pf_disable_irq0(hw);
1909                 if (filter->is_vf)
1910                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
1911                 i40e_pf_enable_irq0(hw);
1912                 break;
1913         case RTE_ETH_FILTER_DELETE:
1914                 i40e_pf_disable_irq0(hw);
1915                 if (filter->is_vf)
1916                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
1917                 i40e_pf_enable_irq0(hw);
1918                 break;
1919         default:
1920                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1921                 ret = I40E_ERR_PARAM;
1922                 break;
1923         }
1924
1925         return ret;
1926 }
1927
1928 static int
1929 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1930                          struct rte_eth_rss_reta_entry64 *reta_conf,
1931                          uint16_t reta_size)
1932 {
1933         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1934         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1935         uint32_t lut, l;
1936         uint16_t i, j, lut_size = pf->hash_lut_size;
1937         uint16_t idx, shift;
1938         uint8_t mask;
1939
1940         if (reta_size != lut_size ||
1941                 reta_size > ETH_RSS_RETA_SIZE_512) {
1942                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1943                         "(%d) doesn't match the number hardware can supported "
1944                                         "(%d)\n", reta_size, lut_size);
1945                 return -EINVAL;
1946         }
1947
1948         for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
1949                 idx = i / RTE_RETA_GROUP_SIZE;
1950                 shift = i % RTE_RETA_GROUP_SIZE;
1951                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1952                                                 I40E_4_BIT_MASK);
1953                 if (!mask)
1954                         continue;
1955                 if (mask == I40E_4_BIT_MASK)
1956                         l = 0;
1957                 else
1958                         l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1959                 for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
1960                         if (mask & (0x1 << j))
1961                                 lut |= reta_conf[idx].reta[shift + j] <<
1962                                                         (CHAR_BIT * j);
1963                         else
1964                                 lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
1965                 }
1966                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1967         }
1968
1969         return 0;
1970 }
1971
1972 static int
1973 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1974                         struct rte_eth_rss_reta_entry64 *reta_conf,
1975                         uint16_t reta_size)
1976 {
1977         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1978         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1979         uint32_t lut;
1980         uint16_t i, j, lut_size = pf->hash_lut_size;
1981         uint16_t idx, shift;
1982         uint8_t mask;
1983
1984         if (reta_size != lut_size ||
1985                 reta_size > ETH_RSS_RETA_SIZE_512) {
1986                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1987                         "(%d) doesn't match the number hardware can supported "
1988                                         "(%d)\n", reta_size, lut_size);
1989                 return -EINVAL;
1990         }
1991
1992         for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
1993                 idx = i / RTE_RETA_GROUP_SIZE;
1994                 shift = i % RTE_RETA_GROUP_SIZE;
1995                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1996                                                 I40E_4_BIT_MASK);
1997                 if (!mask)
1998                         continue;
1999
2000                 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
2001                 for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
2002                         if (mask & (0x1 << j))
2003                                 reta_conf[idx].reta[shift + j] = ((lut >>
2004                                         (CHAR_BIT * j)) & I40E_8_BIT_MASK);
2005                 }
2006         }
2007
2008         return 0;
2009 }
2010
2011 /**
2012  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
2013  * @hw:   pointer to the HW structure
2014  * @mem:  pointer to mem struct to fill out
2015  * @size: size of memory requested
2016  * @alignment: what to align the allocation to
2017  **/
2018 enum i40e_status_code
2019 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2020                         struct i40e_dma_mem *mem,
2021                         u64 size,
2022                         u32 alignment)
2023 {
2024         static uint64_t id = 0;
2025         const struct rte_memzone *mz = NULL;
2026         char z_name[RTE_MEMZONE_NAMESIZE];
2027
2028         if (!mem)
2029                 return I40E_ERR_PARAM;
2030
2031         id++;
2032         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
2033 #ifdef RTE_LIBRTE_XEN_DOM0
2034         mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
2035                                                         RTE_PGSIZE_2M);
2036 #else
2037         mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
2038 #endif
2039         if (!mz)
2040                 return I40E_ERR_NO_MEMORY;
2041
2042         mem->id = id;
2043         mem->size = size;
2044         mem->va = mz->addr;
2045 #ifdef RTE_LIBRTE_XEN_DOM0
2046         mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
2047 #else
2048         mem->pa = mz->phys_addr;
2049 #endif
2050
2051         return I40E_SUCCESS;
2052 }
2053
2054 /**
2055  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
2056  * @hw:   pointer to the HW structure
2057  * @mem:  ptr to mem struct to free
2058  **/
2059 enum i40e_status_code
2060 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2061                     struct i40e_dma_mem *mem)
2062 {
2063         if (!mem || !mem->va)
2064                 return I40E_ERR_PARAM;
2065
2066         mem->va = NULL;
2067         mem->pa = (u64)0;
2068
2069         return I40E_SUCCESS;
2070 }
2071
2072 /**
2073  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
2074  * @hw:   pointer to the HW structure
2075  * @mem:  pointer to mem struct to fill out
2076  * @size: size of memory requested
2077  **/
2078 enum i40e_status_code
2079 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2080                          struct i40e_virt_mem *mem,
2081                          u32 size)
2082 {
2083         if (!mem)
2084                 return I40E_ERR_PARAM;
2085
2086         mem->size = size;
2087         mem->va = rte_zmalloc("i40e", size, 0);
2088
2089         if (mem->va)
2090                 return I40E_SUCCESS;
2091         else
2092                 return I40E_ERR_NO_MEMORY;
2093 }
2094
2095 /**
2096  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
2097  * @hw:   pointer to the HW structure
2098  * @mem:  pointer to mem struct to free
2099  **/
2100 enum i40e_status_code
2101 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2102                      struct i40e_virt_mem *mem)
2103 {
2104         if (!mem)
2105                 return I40E_ERR_PARAM;
2106
2107         rte_free(mem->va);
2108         mem->va = NULL;
2109
2110         return I40E_SUCCESS;
2111 }
2112
2113 void
2114 i40e_init_spinlock_d(struct i40e_spinlock *sp)
2115 {
2116         rte_spinlock_init(&sp->spinlock);
2117 }
2118
2119 void
2120 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
2121 {
2122         rte_spinlock_lock(&sp->spinlock);
2123 }
2124
2125 void
2126 i40e_release_spinlock_d(struct i40e_spinlock *sp)
2127 {
2128         rte_spinlock_unlock(&sp->spinlock);
2129 }
2130
2131 void
2132 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
2133 {
2134         return;
2135 }
2136
2137 /**
2138  * Get the hardware capabilities, which will be parsed
2139  * and saved into struct i40e_hw.
2140  */
2141 static int
2142 i40e_get_cap(struct i40e_hw *hw)
2143 {
2144         struct i40e_aqc_list_capabilities_element_resp *buf;
2145         uint16_t len, size = 0;
2146         int ret;
2147
2148         /* Calculate a huge enough buff for saving response data temporarily */
2149         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
2150                                                 I40E_MAX_CAP_ELE_NUM;
2151         buf = rte_zmalloc("i40e", len, 0);
2152         if (!buf) {
2153                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
2154                 return I40E_ERR_NO_MEMORY;
2155         }
2156
2157         /* Get, parse the capabilities and save it to hw */
2158         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
2159                         i40e_aqc_opc_list_func_capabilities, NULL);
2160         if (ret != I40E_SUCCESS)
2161                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
2162
2163         /* Free the temporary buffer after being used */
2164         rte_free(buf);
2165
2166         return ret;
2167 }
2168
2169 static int
2170 i40e_pf_parameter_init(struct rte_eth_dev *dev)
2171 {
2172         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2173         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2174         uint16_t sum_queues = 0, sum_vsis, left_queues;
2175
2176         /* First check if FW support SRIOV */
2177         if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
2178                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
2179                 return -EINVAL;
2180         }
2181
2182         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
2183         pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
2184         PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
2185         /* Allocate queues for pf */
2186         if (hw->func_caps.rss) {
2187                 pf->flags |= I40E_FLAG_RSS;
2188                 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
2189                         (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
2190                 pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
2191         } else
2192                 pf->lan_nb_qps = 1;
2193         sum_queues = pf->lan_nb_qps;
2194         /* Default VSI is not counted in */
2195         sum_vsis = 0;
2196         PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
2197
2198         if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
2199                 pf->flags |= I40E_FLAG_SRIOV;
2200                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
2201                 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
2202                         PMD_INIT_LOG(ERR, "Config VF number %u, "
2203                                      "max supported %u.",
2204                                      dev->pci_dev->max_vfs,
2205                                      hw->func_caps.num_vfs);
2206                         return -EINVAL;
2207                 }
2208                 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
2209                         PMD_INIT_LOG(ERR, "FVL VF queue %u, "
2210                                      "max support %u queues.",
2211                                      pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
2212                         return -EINVAL;
2213                 }
2214                 pf->vf_num = dev->pci_dev->max_vfs;
2215                 sum_queues += pf->vf_nb_qps * pf->vf_num;
2216                 sum_vsis   += pf->vf_num;
2217                 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
2218                              pf->vf_num, pf->vf_nb_qps);
2219         } else
2220                 pf->vf_num = 0;
2221
2222         if (hw->func_caps.vmdq) {
2223                 pf->flags |= I40E_FLAG_VMDQ;
2224                 pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2225                 pf->max_nb_vmdq_vsi = 1;
2226                 /*
2227                  * If VMDQ available, assume a single VSI can be created.  Will adjust
2228                  * later.
2229                  */
2230                 sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
2231                 sum_vsis += pf->max_nb_vmdq_vsi;
2232         } else {
2233                 pf->vmdq_nb_qps = 0;
2234                 pf->max_nb_vmdq_vsi = 0;
2235         }
2236         pf->nb_cfg_vmdq_vsi = 0;
2237
2238         if (hw->func_caps.fd) {
2239                 pf->flags |= I40E_FLAG_FDIR;
2240                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
2241                 /**
2242                  * Each flow director consumes one VSI and one queue,
2243                  * but can't calculate out predictably here.
2244                  */
2245         }
2246
2247         if (sum_vsis > pf->max_num_vsi ||
2248                 sum_queues > hw->func_caps.num_rx_qp) {
2249                 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
2250                 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
2251                              pf->max_num_vsi, sum_vsis);
2252                 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
2253                              hw->func_caps.num_rx_qp, sum_queues);
2254                 return -EINVAL;
2255         }
2256
2257         /* Adjust VMDQ setting to support as many VMs as possible */
2258         if (pf->flags & I40E_FLAG_VMDQ) {
2259                 left_queues = hw->func_caps.num_rx_qp - sum_queues;
2260
2261                 pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
2262                                         pf->max_num_vsi - sum_vsis);
2263
2264                 /* Limit the max VMDQ number that rte_ether that can support  */
2265                 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
2266                                         ETH_64_POOLS - 1);
2267
2268                 PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
2269                                 pf->max_nb_vmdq_vsi);
2270                 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
2271         }
2272
2273         /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
2274          * cause */
2275         if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
2276                 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
2277                              sum_vsis, hw->func_caps.num_msix_vectors);
2278                 return -EINVAL;
2279         }
2280         return I40E_SUCCESS;
2281 }
2282
2283 static int
2284 i40e_pf_get_switch_config(struct i40e_pf *pf)
2285 {
2286         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2287         struct i40e_aqc_get_switch_config_resp *switch_config;
2288         struct i40e_aqc_switch_config_element_resp *element;
2289         uint16_t start_seid = 0, num_reported;
2290         int ret;
2291
2292         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
2293                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
2294         if (!switch_config) {
2295                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
2296                 return -ENOMEM;
2297         }
2298
2299         /* Get the switch configurations */
2300         ret = i40e_aq_get_switch_config(hw, switch_config,
2301                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
2302         if (ret != I40E_SUCCESS) {
2303                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
2304                 goto fail;
2305         }
2306         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
2307         if (num_reported != 1) { /* The number should be 1 */
2308                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
2309                 goto fail;
2310         }
2311
2312         /* Parse the switch configuration elements */
2313         element = &(switch_config->element[0]);
2314         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
2315                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
2316                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
2317         } else
2318                 PMD_DRV_LOG(INFO, "Unknown element type");
2319
2320 fail:
2321         rte_free(switch_config);
2322
2323         return ret;
2324 }
2325
2326 static int
2327 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
2328                         uint32_t num)
2329 {
2330         struct pool_entry *entry;
2331
2332         if (pool == NULL || num == 0)
2333                 return -EINVAL;
2334
2335         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
2336         if (entry == NULL) {
2337                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
2338                 return -ENOMEM;
2339         }
2340
2341         /* queue heap initialize */
2342         pool->num_free = num;
2343         pool->num_alloc = 0;
2344         pool->base = base;
2345         LIST_INIT(&pool->alloc_list);
2346         LIST_INIT(&pool->free_list);
2347
2348         /* Initialize element  */
2349         entry->base = 0;
2350         entry->len = num;
2351
2352         LIST_INSERT_HEAD(&pool->free_list, entry, next);
2353         return 0;
2354 }
2355
2356 static void
2357 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
2358 {
2359         struct pool_entry *entry;
2360
2361         if (pool == NULL)
2362                 return;
2363
2364         LIST_FOREACH(entry, &pool->alloc_list, next) {
2365                 LIST_REMOVE(entry, next);
2366                 rte_free(entry);
2367         }
2368
2369         LIST_FOREACH(entry, &pool->free_list, next) {
2370                 LIST_REMOVE(entry, next);
2371                 rte_free(entry);
2372         }
2373
2374         pool->num_free = 0;
2375         pool->num_alloc = 0;
2376         pool->base = 0;
2377         LIST_INIT(&pool->alloc_list);
2378         LIST_INIT(&pool->free_list);
2379 }
2380
2381 static int
2382 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2383                        uint32_t base)
2384 {
2385         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2386         uint32_t pool_offset;
2387         int insert;
2388
2389         if (pool == NULL) {
2390                 PMD_DRV_LOG(ERR, "Invalid parameter");
2391                 return -EINVAL;
2392         }
2393
2394         pool_offset = base - pool->base;
2395         /* Lookup in alloc list */
2396         LIST_FOREACH(entry, &pool->alloc_list, next) {
2397                 if (entry->base == pool_offset) {
2398                         valid_entry = entry;
2399                         LIST_REMOVE(entry, next);
2400                         break;
2401                 }
2402         }
2403
2404         /* Not find, return */
2405         if (valid_entry == NULL) {
2406                 PMD_DRV_LOG(ERR, "Failed to find entry");
2407                 return -EINVAL;
2408         }
2409
2410         /**
2411          * Found it, move it to free list  and try to merge.
2412          * In order to make merge easier, always sort it by qbase.
2413          * Find adjacent prev and last entries.
2414          */
2415         prev = next = NULL;
2416         LIST_FOREACH(entry, &pool->free_list, next) {
2417                 if (entry->base > valid_entry->base) {
2418                         next = entry;
2419                         break;
2420                 }
2421                 prev = entry;
2422         }
2423
2424         insert = 0;
2425         /* Try to merge with next one*/
2426         if (next != NULL) {
2427                 /* Merge with next one */
2428                 if (valid_entry->base + valid_entry->len == next->base) {
2429                         next->base = valid_entry->base;
2430                         next->len += valid_entry->len;
2431                         rte_free(valid_entry);
2432                         valid_entry = next;
2433                         insert = 1;
2434                 }
2435         }
2436
2437         if (prev != NULL) {
2438                 /* Merge with previous one */
2439                 if (prev->base + prev->len == valid_entry->base) {
2440                         prev->len += valid_entry->len;
2441                         /* If it merge with next one, remove next node */
2442                         if (insert == 1) {
2443                                 LIST_REMOVE(valid_entry, next);
2444                                 rte_free(valid_entry);
2445                         } else {
2446                                 rte_free(valid_entry);
2447                                 insert = 1;
2448                         }
2449                 }
2450         }
2451
2452         /* Not find any entry to merge, insert */
2453         if (insert == 0) {
2454                 if (prev != NULL)
2455                         LIST_INSERT_AFTER(prev, valid_entry, next);
2456                 else if (next != NULL)
2457                         LIST_INSERT_BEFORE(next, valid_entry, next);
2458                 else /* It's empty list, insert to head */
2459                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2460         }
2461
2462         pool->num_free += valid_entry->len;
2463         pool->num_alloc -= valid_entry->len;
2464
2465         return 0;
2466 }
2467
2468 static int
2469 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2470                        uint16_t num)
2471 {
2472         struct pool_entry *entry, *valid_entry;
2473
2474         if (pool == NULL || num == 0) {
2475                 PMD_DRV_LOG(ERR, "Invalid parameter");
2476                 return -EINVAL;
2477         }
2478
2479         if (pool->num_free < num) {
2480                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
2481                             num, pool->num_free);
2482                 return -ENOMEM;
2483         }
2484
2485         valid_entry = NULL;
2486         /* Lookup  in free list and find most fit one */
2487         LIST_FOREACH(entry, &pool->free_list, next) {
2488                 if (entry->len >= num) {
2489                         /* Find best one */
2490                         if (entry->len == num) {
2491                                 valid_entry = entry;
2492                                 break;
2493                         }
2494                         if (valid_entry == NULL || valid_entry->len > entry->len)
2495                                 valid_entry = entry;
2496                 }
2497         }
2498
2499         /* Not find one to satisfy the request, return */
2500         if (valid_entry == NULL) {
2501                 PMD_DRV_LOG(ERR, "No valid entry found");
2502                 return -ENOMEM;
2503         }
2504         /**
2505          * The entry have equal queue number as requested,
2506          * remove it from alloc_list.
2507          */
2508         if (valid_entry->len == num) {
2509                 LIST_REMOVE(valid_entry, next);
2510         } else {
2511                 /**
2512                  * The entry have more numbers than requested,
2513                  * create a new entry for alloc_list and minus its
2514                  * queue base and number in free_list.
2515                  */
2516                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2517                 if (entry == NULL) {
2518                         PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2519                                     "resource pool");
2520                         return -ENOMEM;
2521                 }
2522                 entry->base = valid_entry->base;
2523                 entry->len = num;
2524                 valid_entry->base += num;
2525                 valid_entry->len -= num;
2526                 valid_entry = entry;
2527         }
2528
2529         /* Insert it into alloc list, not sorted */
2530         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2531
2532         pool->num_free -= valid_entry->len;
2533         pool->num_alloc += valid_entry->len;
2534
2535         return (valid_entry->base + pool->base);
2536 }
2537
2538 /**
2539  * bitmap_is_subset - Check whether src2 is subset of src1
2540  **/
2541 static inline int
2542 bitmap_is_subset(uint8_t src1, uint8_t src2)
2543 {
2544         return !((src1 ^ src2) & src2);
2545 }
2546
2547 static int
2548 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2549 {
2550         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2551
2552         /* If DCB is not supported, only default TC is supported */
2553         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2554                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
2555                 return -EINVAL;
2556         }
2557
2558         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2559                 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2560                             "HW support 0x%x", hw->func_caps.enabled_tcmap,
2561                             enabled_tcmap);
2562                 return -EINVAL;
2563         }
2564         return I40E_SUCCESS;
2565 }
2566
2567 int
2568 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2569                                 struct i40e_vsi_vlan_pvid_info *info)
2570 {
2571         struct i40e_hw *hw;
2572         struct i40e_vsi_context ctxt;
2573         uint8_t vlan_flags = 0;
2574         int ret;
2575
2576         if (vsi == NULL || info == NULL) {
2577                 PMD_DRV_LOG(ERR, "invalid parameters");
2578                 return I40E_ERR_PARAM;
2579         }
2580
2581         if (info->on) {
2582                 vsi->info.pvid = info->config.pvid;
2583                 /**
2584                  * If insert pvid is enabled, only tagged pkts are
2585                  * allowed to be sent out.
2586                  */
2587                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2588                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2589         } else {
2590                 vsi->info.pvid = 0;
2591                 if (info->config.reject.tagged == 0)
2592                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2593
2594                 if (info->config.reject.untagged == 0)
2595                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2596         }
2597         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2598                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
2599         vsi->info.port_vlan_flags |= vlan_flags;
2600         vsi->info.valid_sections =
2601                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2602         memset(&ctxt, 0, sizeof(ctxt));
2603         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2604         ctxt.seid = vsi->seid;
2605
2606         hw = I40E_VSI_TO_HW(vsi);
2607         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2608         if (ret != I40E_SUCCESS)
2609                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2610
2611         return ret;
2612 }
2613
2614 static int
2615 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2616 {
2617         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2618         int i, ret;
2619         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2620
2621         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2622         if (ret != I40E_SUCCESS)
2623                 return ret;
2624
2625         if (!vsi->seid) {
2626                 PMD_DRV_LOG(ERR, "seid not valid");
2627                 return -EINVAL;
2628         }
2629
2630         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2631         tc_bw_data.tc_valid_bits = enabled_tcmap;
2632         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2633                 tc_bw_data.tc_bw_credits[i] =
2634                         (enabled_tcmap & (1 << i)) ? 1 : 0;
2635
2636         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2637         if (ret != I40E_SUCCESS) {
2638                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
2639                 return ret;
2640         }
2641
2642         (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2643                                         sizeof(vsi->info.qs_handle));
2644         return I40E_SUCCESS;
2645 }
2646
2647 static int
2648 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2649                                  struct i40e_aqc_vsi_properties_data *info,
2650                                  uint8_t enabled_tcmap)
2651 {
2652         int ret, total_tc = 0, i;
2653         uint16_t qpnum_per_tc, bsf, qp_idx;
2654
2655         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2656         if (ret != I40E_SUCCESS)
2657                 return ret;
2658
2659         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2660                 if (enabled_tcmap & (1 << i))
2661                         total_tc++;
2662         vsi->enabled_tc = enabled_tcmap;
2663
2664         /* Number of queues per enabled TC */
2665         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
2666         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2667         bsf = rte_bsf32(qpnum_per_tc);
2668
2669         /* Adjust the queue number to actual queues that can be applied */
2670         vsi->nb_qps = qpnum_per_tc * total_tc;
2671
2672         /**
2673          * Configure TC and queue mapping parameters, for enabled TC,
2674          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2675          * default queue will serve it.
2676          */
2677         qp_idx = 0;
2678         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2679                 if (vsi->enabled_tc & (1 << i)) {
2680                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2681                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2682                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2683                         qp_idx += qpnum_per_tc;
2684                 } else
2685                         info->tc_mapping[i] = 0;
2686         }
2687
2688         /* Associate queue number with VSI */
2689         if (vsi->type == I40E_VSI_SRIOV) {
2690                 info->mapping_flags |=
2691                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2692                 for (i = 0; i < vsi->nb_qps; i++)
2693                         info->queue_mapping[i] =
2694                                 rte_cpu_to_le_16(vsi->base_queue + i);
2695         } else {
2696                 info->mapping_flags |=
2697                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2698                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2699         }
2700         info->valid_sections |=
2701                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2702
2703         return I40E_SUCCESS;
2704 }
2705
2706 static int
2707 i40e_veb_release(struct i40e_veb *veb)
2708 {
2709         struct i40e_vsi *vsi;
2710         struct i40e_hw *hw;
2711
2712         if (veb == NULL || veb->associate_vsi == NULL)
2713                 return -EINVAL;
2714
2715         if (!TAILQ_EMPTY(&veb->head)) {
2716                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
2717                 return -EACCES;
2718         }
2719
2720         vsi = veb->associate_vsi;
2721         hw = I40E_VSI_TO_HW(vsi);
2722
2723         vsi->uplink_seid = veb->uplink_seid;
2724         i40e_aq_delete_element(hw, veb->seid, NULL);
2725         rte_free(veb);
2726         vsi->veb = NULL;
2727         return I40E_SUCCESS;
2728 }
2729
2730 /* Setup a veb */
2731 static struct i40e_veb *
2732 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2733 {
2734         struct i40e_veb *veb;
2735         int ret;
2736         struct i40e_hw *hw;
2737
2738         if (NULL == pf || vsi == NULL) {
2739                 PMD_DRV_LOG(ERR, "veb setup failed, "
2740                             "associated VSI shouldn't null");
2741                 return NULL;
2742         }
2743         hw = I40E_PF_TO_HW(pf);
2744
2745         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2746         if (!veb) {
2747                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
2748                 goto fail;
2749         }
2750
2751         veb->associate_vsi = vsi;
2752         TAILQ_INIT(&veb->head);
2753         veb->uplink_seid = vsi->uplink_seid;
2754
2755         ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2756                 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2757
2758         if (ret != I40E_SUCCESS) {
2759                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
2760                             hw->aq.asq_last_status);
2761                 goto fail;
2762         }
2763
2764         /* get statistics index */
2765         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2766                                 &veb->stats_idx, NULL, NULL, NULL);
2767         if (ret != I40E_SUCCESS) {
2768                 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
2769                             hw->aq.asq_last_status);
2770                 goto fail;
2771         }
2772
2773         /* Get VEB bandwidth, to be implemented */
2774         /* Now associated vsi binding to the VEB, set uplink to this VEB */
2775         vsi->uplink_seid = veb->seid;
2776
2777         return veb;
2778 fail:
2779         rte_free(veb);
2780         return NULL;
2781 }
2782
2783 int
2784 i40e_vsi_release(struct i40e_vsi *vsi)
2785 {
2786         struct i40e_pf *pf;
2787         struct i40e_hw *hw;
2788         struct i40e_vsi_list *vsi_list;
2789         int ret;
2790         struct i40e_mac_filter *f;
2791
2792         if (!vsi)
2793                 return I40E_SUCCESS;
2794
2795         pf = I40E_VSI_TO_PF(vsi);
2796         hw = I40E_VSI_TO_HW(vsi);
2797
2798         /* VSI has child to attach, release child first */
2799         if (vsi->veb) {
2800                 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2801                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2802                                 return -1;
2803                         TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2804                 }
2805                 i40e_veb_release(vsi->veb);
2806         }
2807
2808         /* Remove all macvlan filters of the VSI */
2809         i40e_vsi_remove_all_macvlan_filter(vsi);
2810         TAILQ_FOREACH(f, &vsi->mac_list, next)
2811                 rte_free(f);
2812
2813         if (vsi->type != I40E_VSI_MAIN) {
2814                 /* Remove vsi from parent's sibling list */
2815                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2816                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
2817                         return I40E_ERR_PARAM;
2818                 }
2819                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2820                                 &vsi->sib_vsi_list, list);
2821
2822                 /* Remove all switch element of the VSI */
2823                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2824                 if (ret != I40E_SUCCESS)
2825                         PMD_DRV_LOG(ERR, "Failed to delete element");
2826         }
2827         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2828
2829         if (vsi->type != I40E_VSI_SRIOV)
2830                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2831         rte_free(vsi);
2832
2833         return I40E_SUCCESS;
2834 }
2835
2836 static int
2837 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2838 {
2839         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2840         struct i40e_aqc_remove_macvlan_element_data def_filter;
2841         struct i40e_mac_filter_info filter;
2842         int ret;
2843
2844         if (vsi->type != I40E_VSI_MAIN)
2845                 return I40E_ERR_CONFIG;
2846         memset(&def_filter, 0, sizeof(def_filter));
2847         (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2848                                         ETH_ADDR_LEN);
2849         def_filter.vlan_tag = 0;
2850         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2851                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2852         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2853         if (ret != I40E_SUCCESS) {
2854                 struct i40e_mac_filter *f;
2855                 struct ether_addr *mac;
2856
2857                 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2858                             "macvlan filter");
2859                 /* It needs to add the permanent mac into mac list */
2860                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2861                 if (f == NULL) {
2862                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2863                         return I40E_ERR_NO_MEMORY;
2864                 }
2865                 mac = &f->mac_info.mac_addr;
2866                 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
2867                                 ETH_ADDR_LEN);
2868                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2869                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2870                 vsi->mac_num++;
2871
2872                 return ret;
2873         }
2874         (void)rte_memcpy(&filter.mac_addr,
2875                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
2876         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2877         return i40e_vsi_add_mac(vsi, &filter);
2878 }
2879
2880 static int
2881 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2882 {
2883         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2884         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2885         struct i40e_hw *hw = &vsi->adapter->hw;
2886         i40e_status ret;
2887         int i;
2888
2889         memset(&bw_config, 0, sizeof(bw_config));
2890         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2891         if (ret != I40E_SUCCESS) {
2892                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
2893                             hw->aq.asq_last_status);
2894                 return ret;
2895         }
2896
2897         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2898         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2899                                         &ets_sla_config, NULL);
2900         if (ret != I40E_SUCCESS) {
2901                 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2902                             "configuration %u", hw->aq.asq_last_status);
2903                 return ret;
2904         }
2905
2906         /* Not store the info yet, just print out */
2907         PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
2908         PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
2909         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2910                 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
2911                             ets_sla_config.share_credits[i]);
2912                 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
2913                             rte_le_to_cpu_16(ets_sla_config.credits[i]));
2914                 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2915                             rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2916                             (i * 4));
2917         }
2918
2919         return 0;
2920 }
2921
2922 /* Setup a VSI */
2923 struct i40e_vsi *
2924 i40e_vsi_setup(struct i40e_pf *pf,
2925                enum i40e_vsi_type type,
2926                struct i40e_vsi *uplink_vsi,
2927                uint16_t user_param)
2928 {
2929         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2930         struct i40e_vsi *vsi;
2931         struct i40e_mac_filter_info filter;
2932         int ret;
2933         struct i40e_vsi_context ctxt;
2934         struct ether_addr broadcast =
2935                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2936
2937         if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2938                 PMD_DRV_LOG(ERR, "VSI setup failed, "
2939                             "VSI link shouldn't be NULL");
2940                 return NULL;
2941         }
2942
2943         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2944                 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2945                             "uplink VSI should be NULL");
2946                 return NULL;
2947         }
2948
2949         /* If uplink vsi didn't setup VEB, create one first */
2950         if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2951                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2952
2953                 if (NULL == uplink_vsi->veb) {
2954                         PMD_DRV_LOG(ERR, "VEB setup failed");
2955                         return NULL;
2956                 }
2957         }
2958
2959         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2960         if (!vsi) {
2961                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
2962                 return NULL;
2963         }
2964         TAILQ_INIT(&vsi->mac_list);
2965         vsi->type = type;
2966         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2967         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2968         vsi->parent_vsi = uplink_vsi;
2969         vsi->user_param = user_param;
2970         /* Allocate queues */
2971         switch (vsi->type) {
2972         case I40E_VSI_MAIN  :
2973                 vsi->nb_qps = pf->lan_nb_qps;
2974                 break;
2975         case I40E_VSI_SRIOV :
2976                 vsi->nb_qps = pf->vf_nb_qps;
2977                 break;
2978         case I40E_VSI_VMDQ2:
2979                 vsi->nb_qps = pf->vmdq_nb_qps;
2980                 break;
2981         case I40E_VSI_FDIR:
2982                 vsi->nb_qps = pf->fdir_nb_qps;
2983                 break;
2984         default:
2985                 goto fail_mem;
2986         }
2987         /*
2988          * The filter status descriptor is reported in rx queue 0,
2989          * while the tx queue for fdir filter programming has no
2990          * such constraints, can be non-zero queues.
2991          * To simplify it, choose FDIR vsi use queue 0 pair.
2992          * To make sure it will use queue 0 pair, queue allocation
2993          * need be done before this function is called
2994          */
2995         if (type != I40E_VSI_FDIR) {
2996                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2997                         if (ret < 0) {
2998                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2999                                                 vsi->seid, ret);
3000                                 goto fail_mem;
3001                         }
3002                         vsi->base_queue = ret;
3003         } else
3004                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
3005
3006         /* VF has MSIX interrupt in VF range, don't allocate here */
3007         if (type != I40E_VSI_SRIOV) {
3008                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
3009                 if (ret < 0) {
3010                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
3011                         goto fail_queue_alloc;
3012                 }
3013                 vsi->msix_intr = ret;
3014         } else
3015                 vsi->msix_intr = 0;
3016         /* Add VSI */
3017         if (type == I40E_VSI_MAIN) {
3018                 /* For main VSI, no need to add since it's default one */
3019                 vsi->uplink_seid = pf->mac_seid;
3020                 vsi->seid = pf->main_vsi_seid;
3021                 /* Bind queues with specific MSIX interrupt */
3022                 /**
3023                  * Needs 2 interrupt at least, one for misc cause which will
3024                  * enabled from OS side, Another for queues binding the
3025                  * interrupt from device side only.
3026                  */
3027
3028                 /* Get default VSI parameters from hardware */
3029                 memset(&ctxt, 0, sizeof(ctxt));
3030                 ctxt.seid = vsi->seid;
3031                 ctxt.pf_num = hw->pf_id;
3032                 ctxt.uplink_seid = vsi->uplink_seid;
3033                 ctxt.vf_num = 0;
3034                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
3035                 if (ret != I40E_SUCCESS) {
3036                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
3037                         goto fail_msix_alloc;
3038                 }
3039                 (void)rte_memcpy(&vsi->info, &ctxt.info,
3040                         sizeof(struct i40e_aqc_vsi_properties_data));
3041                 vsi->vsi_id = ctxt.vsi_number;
3042                 vsi->info.valid_sections = 0;
3043
3044                 /* Configure tc, enabled TC0 only */
3045                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
3046                         I40E_SUCCESS) {
3047                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
3048                         goto fail_msix_alloc;
3049                 }
3050
3051                 /* TC, queue mapping */
3052                 memset(&ctxt, 0, sizeof(ctxt));
3053                 vsi->info.valid_sections |=
3054                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3055                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3056                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3057                 (void)rte_memcpy(&ctxt.info, &vsi->info,
3058                         sizeof(struct i40e_aqc_vsi_properties_data));
3059                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3060                                                 I40E_DEFAULT_TCMAP);
3061                 if (ret != I40E_SUCCESS) {
3062                         PMD_DRV_LOG(ERR, "Failed to configure "
3063                                     "TC queue mapping");
3064                         goto fail_msix_alloc;
3065                 }
3066                 ctxt.seid = vsi->seid;
3067                 ctxt.pf_num = hw->pf_id;
3068                 ctxt.uplink_seid = vsi->uplink_seid;
3069                 ctxt.vf_num = 0;
3070
3071                 /* Update VSI parameters */
3072                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3073                 if (ret != I40E_SUCCESS) {
3074                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
3075                         goto fail_msix_alloc;
3076                 }
3077
3078                 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
3079                                                 sizeof(vsi->info.tc_mapping));
3080                 (void)rte_memcpy(&vsi->info.queue_mapping,
3081                                 &ctxt.info.queue_mapping,
3082                         sizeof(vsi->info.queue_mapping));
3083                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
3084                 vsi->info.valid_sections = 0;
3085
3086                 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
3087                                 ETH_ADDR_LEN);
3088
3089                 /**
3090                  * Updating default filter settings are necessary to prevent
3091                  * reception of tagged packets.
3092                  * Some old firmware configurations load a default macvlan
3093                  * filter which accepts both tagged and untagged packets.
3094                  * The updating is to use a normal filter instead if needed.
3095                  * For NVM 4.2.2 or after, the updating is not needed anymore.
3096                  * The firmware with correct configurations load the default
3097                  * macvlan filter which is expected and cannot be removed.
3098                  */
3099                 i40e_update_default_filter_setting(vsi);
3100                 i40e_config_qinq(hw, vsi);
3101         } else if (type == I40E_VSI_SRIOV) {
3102                 memset(&ctxt, 0, sizeof(ctxt));
3103                 /**
3104                  * For other VSI, the uplink_seid equals to uplink VSI's
3105                  * uplink_seid since they share same VEB
3106                  */
3107                 vsi->uplink_seid = uplink_vsi->uplink_seid;
3108                 ctxt.pf_num = hw->pf_id;
3109                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
3110                 ctxt.uplink_seid = vsi->uplink_seid;
3111                 ctxt.connection_type = 0x1;
3112                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
3113
3114                 /**
3115                  * Do not configure switch ID to enable VEB switch by
3116                  * I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB. Because in Fortville,
3117                  * if the source mac address of packet sent from VF is not
3118                  * listed in the VEB's mac table, the VEB will switch the
3119                  * packet back to the VF. Need to enable it when HW issue
3120                  * is fixed.
3121                  */
3122
3123                 /* Configure port/vlan */
3124                 ctxt.info.valid_sections |=
3125                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3126                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
3127                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3128                                                 I40E_DEFAULT_TCMAP);
3129                 if (ret != I40E_SUCCESS) {
3130                         PMD_DRV_LOG(ERR, "Failed to configure "
3131                                     "TC queue mapping");
3132                         goto fail_msix_alloc;
3133                 }
3134                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3135                 ctxt.info.valid_sections |=
3136                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3137                 /**
3138                  * Since VSI is not created yet, only configure parameter,
3139                  * will add vsi below.
3140                  */
3141
3142                 i40e_config_qinq(hw, vsi);
3143         } else if (type == I40E_VSI_VMDQ2) {
3144                 memset(&ctxt, 0, sizeof(ctxt));
3145                 /*
3146                  * For other VSI, the uplink_seid equals to uplink VSI's
3147                  * uplink_seid since they share same VEB
3148                  */
3149                 vsi->uplink_seid = uplink_vsi->uplink_seid;
3150                 ctxt.pf_num = hw->pf_id;
3151                 ctxt.vf_num = 0;
3152                 ctxt.uplink_seid = vsi->uplink_seid;
3153                 ctxt.connection_type = 0x1;
3154                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
3155
3156                 ctxt.info.valid_sections |=
3157                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
3158                 /* user_param carries flag to enable loop back */
3159                 if (user_param) {
3160                         ctxt.info.switch_id =
3161                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
3162                         ctxt.info.switch_id |=
3163                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
3164                 }
3165
3166                 /* Configure port/vlan */
3167                 ctxt.info.valid_sections |=
3168                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3169                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
3170                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3171                                                 I40E_DEFAULT_TCMAP);
3172                 if (ret != I40E_SUCCESS) {
3173                         PMD_DRV_LOG(ERR, "Failed to configure "
3174                                         "TC queue mapping");
3175                         goto fail_msix_alloc;
3176                 }
3177                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3178                 ctxt.info.valid_sections |=
3179                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3180         } else if (type == I40E_VSI_FDIR) {
3181                 memset(&ctxt, 0, sizeof(ctxt));
3182                 vsi->uplink_seid = uplink_vsi->uplink_seid;
3183                 ctxt.pf_num = hw->pf_id;
3184                 ctxt.vf_num = 0;
3185                 ctxt.uplink_seid = vsi->uplink_seid;
3186                 ctxt.connection_type = 0x1;     /* regular data port */
3187                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
3188                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3189                                                 I40E_DEFAULT_TCMAP);
3190                 if (ret != I40E_SUCCESS) {
3191                         PMD_DRV_LOG(ERR, "Failed to configure "
3192                                         "TC queue mapping.");
3193                         goto fail_msix_alloc;
3194                 }
3195                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3196                 ctxt.info.valid_sections |=
3197                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3198         } else {
3199                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
3200                 goto fail_msix_alloc;
3201         }
3202
3203         if (vsi->type != I40E_VSI_MAIN) {
3204                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
3205                 if (ret != I40E_SUCCESS) {
3206                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
3207                                     hw->aq.asq_last_status);
3208                         goto fail_msix_alloc;
3209                 }
3210                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
3211                 vsi->info.valid_sections = 0;
3212                 vsi->seid = ctxt.seid;
3213                 vsi->vsi_id = ctxt.vsi_number;
3214                 vsi->sib_vsi_list.vsi = vsi;
3215                 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
3216                                 &vsi->sib_vsi_list, list);
3217         }
3218
3219         /* MAC/VLAN configuration */
3220         (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
3221         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3222
3223         ret = i40e_vsi_add_mac(vsi, &filter);
3224         if (ret != I40E_SUCCESS) {
3225                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3226                 goto fail_msix_alloc;
3227         }
3228
3229         /* Get VSI BW information */
3230         i40e_vsi_dump_bw_config(vsi);
3231         return vsi;
3232 fail_msix_alloc:
3233         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
3234 fail_queue_alloc:
3235         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
3236 fail_mem:
3237         rte_free(vsi);
3238         return NULL;
3239 }
3240
3241 /* Configure vlan stripping on or off */
3242 int
3243 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
3244 {
3245         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3246         struct i40e_vsi_context ctxt;
3247         uint8_t vlan_flags;
3248         int ret = I40E_SUCCESS;
3249
3250         /* Check if it has been already on or off */
3251         if (vsi->info.valid_sections &
3252                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
3253                 if (on) {
3254                         if ((vsi->info.port_vlan_flags &
3255                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
3256                                 return 0; /* already on */
3257                 } else {
3258                         if ((vsi->info.port_vlan_flags &
3259                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3260                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
3261                                 return 0; /* already off */
3262                 }
3263         }
3264
3265         if (on)
3266                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3267         else
3268                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3269         vsi->info.valid_sections =
3270                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3271         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
3272         vsi->info.port_vlan_flags |= vlan_flags;
3273         ctxt.seid = vsi->seid;
3274         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3275         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3276         if (ret)
3277                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
3278                             on ? "enable" : "disable");
3279
3280         return ret;
3281 }
3282
3283 static int
3284 i40e_dev_init_vlan(struct rte_eth_dev *dev)
3285 {
3286         struct rte_eth_dev_data *data = dev->data;
3287         int ret;
3288
3289         /* Apply vlan offload setting */
3290         i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
3291
3292         /* Apply double-vlan setting, not implemented yet */
3293
3294         /* Apply pvid setting */
3295         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
3296                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
3297         if (ret)
3298                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
3299
3300         return ret;
3301 }
3302
3303 static int
3304 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
3305 {
3306         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3307
3308         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
3309 }
3310
3311 static int
3312 i40e_update_flow_control(struct i40e_hw *hw)
3313 {
3314 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
3315         struct i40e_link_status link_status;
3316         uint32_t rxfc = 0, txfc = 0, reg;
3317         uint8_t an_info;
3318         int ret;
3319
3320         memset(&link_status, 0, sizeof(link_status));
3321         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
3322         if (ret != I40E_SUCCESS) {
3323                 PMD_DRV_LOG(ERR, "Failed to get link status information");
3324                 goto write_reg; /* Disable flow control */
3325         }
3326
3327         an_info = hw->phy.link_info.an_info;
3328         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
3329                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
3330                 ret = I40E_ERR_NOT_READY;
3331                 goto write_reg; /* Disable flow control */
3332         }
3333         /**
3334          * If link auto negotiation is enabled, flow control needs to
3335          * be configured according to it
3336          */
3337         switch (an_info & I40E_LINK_PAUSE_RXTX) {
3338         case I40E_LINK_PAUSE_RXTX:
3339                 rxfc = 1;
3340                 txfc = 1;
3341                 hw->fc.current_mode = I40E_FC_FULL;
3342                 break;
3343         case I40E_AQ_LINK_PAUSE_RX:
3344                 rxfc = 1;
3345                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
3346                 break;
3347         case I40E_AQ_LINK_PAUSE_TX:
3348                 txfc = 1;
3349                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
3350                 break;
3351         default:
3352                 hw->fc.current_mode = I40E_FC_NONE;
3353                 break;
3354         }
3355
3356 write_reg:
3357         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
3358                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
3359         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3360         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
3361         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
3362         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
3363
3364         return ret;
3365 }
3366
3367 /* PF setup */
3368 static int
3369 i40e_pf_setup(struct i40e_pf *pf)
3370 {
3371         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3372         struct i40e_filter_control_settings settings;
3373         struct i40e_vsi *vsi;
3374         int ret;
3375
3376         /* Clear all stats counters */
3377         pf->offset_loaded = FALSE;
3378         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
3379         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
3380
3381         ret = i40e_pf_get_switch_config(pf);
3382         if (ret != I40E_SUCCESS) {
3383                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
3384                 return ret;
3385         }
3386         if (pf->flags & I40E_FLAG_FDIR) {
3387                 /* make queue allocated first, let FDIR use queue pair 0*/
3388                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
3389                 if (ret != I40E_FDIR_QUEUE_ID) {
3390                         PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
3391                                     " ret =%d", ret);
3392                         pf->flags &= ~I40E_FLAG_FDIR;
3393                 }
3394         }
3395         /*  main VSI setup */
3396         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
3397         if (!vsi) {
3398                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
3399                 return I40E_ERR_NOT_READY;
3400         }
3401         pf->main_vsi = vsi;
3402
3403         /* Configure filter control */
3404         memset(&settings, 0, sizeof(settings));
3405         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
3406                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
3407         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
3408                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
3409         else {
3410                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
3411                                                 hw->func_caps.rss_table_size);
3412                 return I40E_ERR_PARAM;
3413         }
3414         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
3415                         "size: %u\n", hw->func_caps.rss_table_size);
3416         pf->hash_lut_size = hw->func_caps.rss_table_size;
3417
3418         /* Enable ethtype and macvlan filters */
3419         settings.enable_ethtype = TRUE;
3420         settings.enable_macvlan = TRUE;
3421         ret = i40e_set_filter_control(hw, &settings);
3422         if (ret)
3423                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
3424                                                                 ret);
3425
3426         /* Update flow control according to the auto negotiation */
3427         i40e_update_flow_control(hw);
3428
3429         return I40E_SUCCESS;
3430 }
3431
3432 int
3433 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3434 {
3435         uint32_t reg;
3436         uint16_t j;
3437
3438         /**
3439          * Set or clear TX Queue Disable flags,
3440          * which is required by hardware.
3441          */
3442         i40e_pre_tx_queue_cfg(hw, q_idx, on);
3443         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
3444
3445         /* Wait until the request is finished */
3446         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3447                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3448                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3449                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3450                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
3451                                                         & 0x1))) {
3452                         break;
3453                 }
3454         }
3455         if (on) {
3456                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3457                         return I40E_SUCCESS; /* already on, skip next steps */
3458
3459                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
3460                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3461         } else {
3462                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3463                         return I40E_SUCCESS; /* already off, skip next steps */
3464                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3465         }
3466         /* Write the register */
3467         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3468         /* Check the result */
3469         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3470                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3471                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3472                 if (on) {
3473                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3474                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
3475                                 break;
3476                 } else {
3477                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3478                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3479                                 break;
3480                 }
3481         }
3482         /* Check if it is timeout */
3483         if (j >= I40E_CHK_Q_ENA_COUNT) {
3484                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
3485                             (on ? "enable" : "disable"), q_idx);
3486                 return I40E_ERR_TIMEOUT;
3487         }
3488
3489         return I40E_SUCCESS;
3490 }
3491
3492 /* Swith on or off the tx queues */
3493 static int
3494 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
3495 {
3496         struct rte_eth_dev_data *dev_data = pf->dev_data;
3497         struct i40e_tx_queue *txq;
3498         struct rte_eth_dev *dev = pf->adapter->eth_dev;
3499         uint16_t i;
3500         int ret;
3501
3502         for (i = 0; i < dev_data->nb_tx_queues; i++) {
3503                 txq = dev_data->tx_queues[i];
3504                 /* Don't operate the queue if not configured or
3505                  * if starting only per queue */
3506                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
3507                         continue;
3508                 if (on)
3509                         ret = i40e_dev_tx_queue_start(dev, i);
3510                 else
3511                         ret = i40e_dev_tx_queue_stop(dev, i);
3512                 if ( ret != I40E_SUCCESS)
3513                         return ret;
3514         }
3515
3516         return I40E_SUCCESS;
3517 }
3518
3519 int
3520 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3521 {
3522         uint32_t reg;
3523         uint16_t j;
3524
3525         /* Wait until the request is finished */
3526         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3527                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3528                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3529                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3530                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3531                         break;
3532         }
3533
3534         if (on) {
3535                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3536                         return I40E_SUCCESS; /* Already on, skip next steps */
3537                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3538         } else {
3539                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3540                         return I40E_SUCCESS; /* Already off, skip next steps */
3541                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3542         }
3543
3544         /* Write the register */
3545         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3546         /* Check the result */
3547         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3548                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3549                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3550                 if (on) {
3551                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3552                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3553                                 break;
3554                 } else {
3555                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3556                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3557                                 break;
3558                 }
3559         }
3560
3561         /* Check if it is timeout */
3562         if (j >= I40E_CHK_Q_ENA_COUNT) {
3563                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
3564                             (on ? "enable" : "disable"), q_idx);
3565                 return I40E_ERR_TIMEOUT;
3566         }
3567
3568         return I40E_SUCCESS;
3569 }
3570 /* Switch on or off the rx queues */
3571 static int
3572 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
3573 {
3574         struct rte_eth_dev_data *dev_data = pf->dev_data;
3575         struct i40e_rx_queue *rxq;
3576         struct rte_eth_dev *dev = pf->adapter->eth_dev;
3577         uint16_t i;
3578         int ret;
3579
3580         for (i = 0; i < dev_data->nb_rx_queues; i++) {
3581                 rxq = dev_data->rx_queues[i];
3582                 /* Don't operate the queue if not configured or
3583                  * if starting only per queue */
3584                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
3585                         continue;
3586                 if (on)
3587                         ret = i40e_dev_rx_queue_start(dev, i);
3588                 else
3589                         ret = i40e_dev_rx_queue_stop(dev, i);
3590                 if (ret != I40E_SUCCESS)
3591                         return ret;
3592         }
3593
3594         return I40E_SUCCESS;
3595 }
3596
3597 /* Switch on or off all the rx/tx queues */
3598 int
3599 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
3600 {
3601         int ret;
3602
3603         if (on) {
3604                 /* enable rx queues before enabling tx queues */
3605                 ret = i40e_dev_switch_rx_queues(pf, on);
3606                 if (ret) {
3607                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
3608                         return ret;
3609                 }
3610                 ret = i40e_dev_switch_tx_queues(pf, on);
3611         } else {
3612                 /* Stop tx queues before stopping rx queues */
3613                 ret = i40e_dev_switch_tx_queues(pf, on);
3614                 if (ret) {
3615                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
3616                         return ret;
3617                 }
3618                 ret = i40e_dev_switch_rx_queues(pf, on);
3619         }
3620
3621         return ret;
3622 }
3623
3624 /* Initialize VSI for TX */
3625 static int
3626 i40e_dev_tx_init(struct i40e_pf *pf)
3627 {
3628         struct rte_eth_dev_data *data = pf->dev_data;
3629         uint16_t i;
3630         uint32_t ret = I40E_SUCCESS;
3631         struct i40e_tx_queue *txq;
3632
3633         for (i = 0; i < data->nb_tx_queues; i++) {
3634                 txq = data->tx_queues[i];
3635                 if (!txq || !txq->q_set)
3636                         continue;
3637                 ret = i40e_tx_queue_init(txq);
3638                 if (ret != I40E_SUCCESS)
3639                         break;
3640         }
3641
3642         return ret;
3643 }
3644
3645 /* Initialize VSI for RX */
3646 static int
3647 i40e_dev_rx_init(struct i40e_pf *pf)
3648 {
3649         struct rte_eth_dev_data *data = pf->dev_data;
3650         int ret = I40E_SUCCESS;
3651         uint16_t i;
3652         struct i40e_rx_queue *rxq;
3653
3654         i40e_pf_config_mq_rx(pf);
3655         for (i = 0; i < data->nb_rx_queues; i++) {
3656                 rxq = data->rx_queues[i];
3657                 if (!rxq || !rxq->q_set)
3658                         continue;
3659
3660                 ret = i40e_rx_queue_init(rxq);
3661                 if (ret != I40E_SUCCESS) {
3662                         PMD_DRV_LOG(ERR, "Failed to do RX queue "
3663                                     "initialization");
3664                         break;
3665                 }
3666         }
3667
3668         return ret;
3669 }
3670
3671 static int
3672 i40e_dev_rxtx_init(struct i40e_pf *pf)
3673 {
3674         int err;
3675
3676         err = i40e_dev_tx_init(pf);
3677         if (err) {
3678                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
3679                 return err;
3680         }
3681         err = i40e_dev_rx_init(pf);
3682         if (err) {
3683                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
3684                 return err;
3685         }
3686
3687         return err;
3688 }
3689
3690 static int
3691 i40e_vmdq_setup(struct rte_eth_dev *dev)
3692 {
3693         struct rte_eth_conf *conf = &dev->data->dev_conf;
3694         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3695         int i, err, conf_vsis, j, loop;
3696         struct i40e_vsi *vsi;
3697         struct i40e_vmdq_info *vmdq_info;
3698         struct rte_eth_vmdq_rx_conf *vmdq_conf;
3699         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3700
3701         /*
3702          * Disable interrupt to avoid message from VF. Furthermore, it will
3703          * avoid race condition in VSI creation/destroy.
3704          */
3705         i40e_pf_disable_irq0(hw);
3706
3707         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
3708                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
3709                 return -ENOTSUP;
3710         }
3711
3712         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
3713         if (conf_vsis > pf->max_nb_vmdq_vsi) {
3714                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
3715                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
3716                         pf->max_nb_vmdq_vsi);
3717                 return -ENOTSUP;
3718         }
3719
3720         if (pf->vmdq != NULL) {
3721                 PMD_INIT_LOG(INFO, "VMDQ already configured");
3722                 return 0;
3723         }
3724
3725         pf->vmdq = rte_zmalloc("vmdq_info_struct",
3726                                 sizeof(*vmdq_info) * conf_vsis, 0);
3727
3728         if (pf->vmdq == NULL) {
3729                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
3730                 return -ENOMEM;
3731         }
3732
3733         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
3734
3735         /* Create VMDQ VSI */
3736         for (i = 0; i < conf_vsis; i++) {
3737                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
3738                                 vmdq_conf->enable_loop_back);
3739                 if (vsi == NULL) {
3740                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
3741                         err = -1;
3742                         goto err_vsi_setup;
3743                 }
3744                 vmdq_info = &pf->vmdq[i];
3745                 vmdq_info->pf = pf;
3746                 vmdq_info->vsi = vsi;
3747         }
3748         pf->nb_cfg_vmdq_vsi = conf_vsis;
3749
3750         /* Configure Vlan */
3751         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
3752         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
3753                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
3754                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
3755                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
3756                                         vmdq_conf->pool_map[i].vlan_id, j);
3757
3758                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
3759                                                 vmdq_conf->pool_map[i].vlan_id);
3760                                 if (err) {
3761                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
3762                                         err = -1;
3763                                         goto err_vsi_setup;
3764                                 }
3765                         }
3766                 }
3767         }
3768
3769         i40e_pf_enable_irq0(hw);
3770
3771         return 0;
3772
3773 err_vsi_setup:
3774         for (i = 0; i < conf_vsis; i++)
3775                 if (pf->vmdq[i].vsi == NULL)
3776                         break;
3777                 else
3778                         i40e_vsi_release(pf->vmdq[i].vsi);
3779
3780         rte_free(pf->vmdq);
3781         pf->vmdq = NULL;
3782         i40e_pf_enable_irq0(hw);
3783         return err;
3784 }
3785
3786 static void
3787 i40e_stat_update_32(struct i40e_hw *hw,
3788                    uint32_t reg,
3789                    bool offset_loaded,
3790                    uint64_t *offset,
3791                    uint64_t *stat)
3792 {
3793         uint64_t new_data;
3794
3795         new_data = (uint64_t)I40E_READ_REG(hw, reg);
3796         if (!offset_loaded)
3797                 *offset = new_data;
3798
3799         if (new_data >= *offset)
3800                 *stat = (uint64_t)(new_data - *offset);
3801         else
3802                 *stat = (uint64_t)((new_data +
3803                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
3804 }
3805
3806 static void
3807 i40e_stat_update_48(struct i40e_hw *hw,
3808                    uint32_t hireg,
3809                    uint32_t loreg,
3810                    bool offset_loaded,
3811                    uint64_t *offset,
3812                    uint64_t *stat)
3813 {
3814         uint64_t new_data;
3815
3816         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3817         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3818                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
3819
3820         if (!offset_loaded)
3821                 *offset = new_data;
3822
3823         if (new_data >= *offset)
3824                 *stat = new_data - *offset;
3825         else
3826                 *stat = (uint64_t)((new_data +
3827                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
3828
3829         *stat &= I40E_48_BIT_MASK;
3830 }
3831
3832 /* Disable IRQ0 */
3833 void
3834 i40e_pf_disable_irq0(struct i40e_hw *hw)
3835 {
3836         /* Disable all interrupt types */
3837         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3838         I40E_WRITE_FLUSH(hw);
3839 }
3840
3841 /* Enable IRQ0 */
3842 void
3843 i40e_pf_enable_irq0(struct i40e_hw *hw)
3844 {
3845         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3846                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3847                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3848                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3849         I40E_WRITE_FLUSH(hw);
3850 }
3851
3852 static void
3853 i40e_pf_config_irq0(struct i40e_hw *hw)
3854 {
3855         /* read pending request and disable first */
3856         i40e_pf_disable_irq0(hw);
3857         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
3858         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3859                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3860
3861         /* Link no queues with irq0 */
3862         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3863                 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3864 }
3865
3866 static void
3867 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3868 {
3869         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3870         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3871         int i;
3872         uint16_t abs_vf_id;
3873         uint32_t index, offset, val;
3874
3875         if (!pf->vfs)
3876                 return;
3877         /**
3878          * Try to find which VF trigger a reset, use absolute VF id to access
3879          * since the reg is global register.
3880          */
3881         for (i = 0; i < pf->vf_num; i++) {
3882                 abs_vf_id = hw->func_caps.vf_base_id + i;
3883                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3884                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3885                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3886                 /* VFR event occured */
3887                 if (val & (0x1 << offset)) {
3888                         int ret;
3889
3890                         /* Clear the event first */
3891                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3892                                                         (0x1 << offset));
3893                         PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
3894                         /**
3895                          * Only notify a VF reset event occured,
3896                          * don't trigger another SW reset
3897                          */
3898                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3899                         if (ret != I40E_SUCCESS)
3900                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
3901                 }
3902         }
3903 }
3904
3905 static void
3906 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3907 {
3908         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3909         struct i40e_arq_event_info info;
3910         uint16_t pending, opcode;
3911         int ret;
3912
3913         info.buf_len = I40E_AQ_BUF_SZ;
3914         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
3915         if (!info.msg_buf) {
3916                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
3917                 return;
3918         }
3919
3920         pending = 1;
3921         while (pending) {
3922                 ret = i40e_clean_arq_element(hw, &info, &pending);
3923
3924                 if (ret != I40E_SUCCESS) {
3925                         PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3926                                     "aq_err: %u", hw->aq.asq_last_status);
3927                         break;
3928                 }
3929                 opcode = rte_le_to_cpu_16(info.desc.opcode);
3930
3931                 switch (opcode) {
3932                 case i40e_aqc_opc_send_msg_to_pf:
3933                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3934                         i40e_pf_host_handle_vf_msg(dev,
3935                                         rte_le_to_cpu_16(info.desc.retval),
3936                                         rte_le_to_cpu_32(info.desc.cookie_high),
3937                                         rte_le_to_cpu_32(info.desc.cookie_low),
3938                                         info.msg_buf,
3939                                         info.msg_len);
3940                         break;
3941                 default:
3942                         PMD_DRV_LOG(ERR, "Request %u is not supported yet",
3943                                     opcode);
3944                         break;
3945                 }
3946         }
3947         rte_free(info.msg_buf);
3948 }
3949
3950 /*
3951  * Interrupt handler is registered as the alarm callback for handling LSC
3952  * interrupt in a definite of time, in order to wait the NIC into a stable
3953  * state. Currently it waits 1 sec in i40e for the link up interrupt, and
3954  * no need for link down interrupt.
3955  */
3956 static void
3957 i40e_dev_interrupt_delayed_handler(void *param)
3958 {
3959         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3960         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3961         uint32_t icr0;
3962
3963         /* read interrupt causes again */
3964         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3965
3966 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
3967         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
3968                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
3969         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3970                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
3971         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
3972                 PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
3973         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3974                 PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
3975         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
3976                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
3977                                                                 "state\n");
3978         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
3979                 PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
3980         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
3981                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
3982 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
3983
3984         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3985                 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
3986                 i40e_dev_handle_vfr_event(dev);
3987         }
3988         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3989                 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
3990                 i40e_dev_handle_aq_msg(dev);
3991         }
3992
3993         /* handle the link up interrupt in an alarm callback */
3994         i40e_dev_link_update(dev, 0);
3995         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
3996
3997         i40e_pf_enable_irq0(hw);
3998         rte_intr_enable(&(dev->pci_dev->intr_handle));
3999 }
4000
4001 /**
4002  * Interrupt handler triggered by NIC  for handling
4003  * specific interrupt.
4004  *
4005  * @param handle
4006  *  Pointer to interrupt handle.
4007  * @param param
4008  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4009  *
4010  * @return
4011  *  void
4012  */
4013 static void
4014 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
4015                            void *param)
4016 {
4017         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4018         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4019         uint32_t icr0;
4020
4021         /* Disable interrupt */
4022         i40e_pf_disable_irq0(hw);
4023
4024         /* read out interrupt causes */
4025         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
4026
4027         /* No interrupt event indicated */
4028         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
4029                 PMD_DRV_LOG(INFO, "No interrupt event");
4030                 goto done;
4031         }
4032 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
4033         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
4034                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
4035         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
4036                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
4037         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
4038                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
4039         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
4040                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
4041         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
4042                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
4043         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
4044                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
4045         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
4046                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
4047 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
4048
4049         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4050                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
4051                 i40e_dev_handle_vfr_event(dev);
4052         }
4053         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4054                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
4055                 i40e_dev_handle_aq_msg(dev);
4056         }
4057
4058         /* Link Status Change interrupt */
4059         if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
4060 #define I40E_US_PER_SECOND 1000000
4061                 struct rte_eth_link link;
4062
4063                 PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
4064                 memset(&link, 0, sizeof(link));
4065                 rte_i40e_dev_atomic_read_link_status(dev, &link);
4066                 i40e_dev_link_update(dev, 0);
4067
4068                 /*
4069                  * For link up interrupt, it needs to wait 1 second to let the
4070                  * hardware be a stable state. Otherwise several consecutive
4071                  * interrupts can be observed.
4072                  * For link down interrupt, no need to wait.
4073                  */
4074                 if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
4075                         i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
4076                         return;
4077                 else
4078                         _rte_eth_dev_callback_process(dev,
4079                                 RTE_ETH_EVENT_INTR_LSC);
4080         }
4081
4082 done:
4083         /* Enable interrupt */
4084         i40e_pf_enable_irq0(hw);
4085         rte_intr_enable(&(dev->pci_dev->intr_handle));
4086 }
4087
4088 static int
4089 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
4090                          struct i40e_macvlan_filter *filter,
4091                          int total)
4092 {
4093         int ele_num, ele_buff_size;
4094         int num, actual_num, i;
4095         uint16_t flags;
4096         int ret = I40E_SUCCESS;
4097         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4098         struct i40e_aqc_add_macvlan_element_data *req_list;
4099
4100         if (filter == NULL  || total == 0)
4101                 return I40E_ERR_PARAM;
4102         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
4103         ele_buff_size = hw->aq.asq_buf_size;
4104
4105         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
4106         if (req_list == NULL) {
4107                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
4108                 return I40E_ERR_NO_MEMORY;
4109         }
4110
4111         num = 0;
4112         do {
4113                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
4114                 memset(req_list, 0, ele_buff_size);
4115
4116                 for (i = 0; i < actual_num; i++) {
4117                         (void)rte_memcpy(req_list[i].mac_addr,
4118                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
4119                         req_list[i].vlan_tag =
4120                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
4121
4122                         switch (filter[num + i].filter_type) {
4123                         case RTE_MAC_PERFECT_MATCH:
4124                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
4125                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
4126                                 break;
4127                         case RTE_MACVLAN_PERFECT_MATCH:
4128                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
4129                                 break;
4130                         case RTE_MAC_HASH_MATCH:
4131                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
4132                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
4133                                 break;
4134                         case RTE_MACVLAN_HASH_MATCH:
4135                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
4136                                 break;
4137                         default:
4138                                 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
4139                                 ret = I40E_ERR_PARAM;
4140                                 goto DONE;
4141                         }
4142
4143                         req_list[i].queue_number = 0;
4144
4145                         req_list[i].flags = rte_cpu_to_le_16(flags);
4146                 }
4147
4148                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
4149                                                 actual_num, NULL);
4150                 if (ret != I40E_SUCCESS) {
4151                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
4152                         goto DONE;
4153                 }
4154                 num += actual_num;
4155         } while (num < total);
4156
4157 DONE:
4158         rte_free(req_list);
4159         return ret;
4160 }
4161
4162 static int
4163 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
4164                             struct i40e_macvlan_filter *filter,
4165                             int total)
4166 {
4167         int ele_num, ele_buff_size;
4168         int num, actual_num, i;
4169         uint16_t flags;
4170         int ret = I40E_SUCCESS;
4171         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4172         struct i40e_aqc_remove_macvlan_element_data *req_list;
4173
4174         if (filter == NULL  || total == 0)
4175                 return I40E_ERR_PARAM;
4176
4177         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
4178         ele_buff_size = hw->aq.asq_buf_size;
4179
4180         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
4181         if (req_list == NULL) {
4182                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
4183                 return I40E_ERR_NO_MEMORY;
4184         }
4185
4186         num = 0;
4187         do {
4188                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
4189                 memset(req_list, 0, ele_buff_size);
4190
4191                 for (i = 0; i < actual_num; i++) {
4192                         (void)rte_memcpy(req_list[i].mac_addr,
4193                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
4194                         req_list[i].vlan_tag =
4195                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
4196
4197                         switch (filter[num + i].filter_type) {
4198                         case RTE_MAC_PERFECT_MATCH:
4199                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4200                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4201                                 break;
4202                         case RTE_MACVLAN_PERFECT_MATCH:
4203                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
4204                                 break;
4205                         case RTE_MAC_HASH_MATCH:
4206                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
4207                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4208                                 break;
4209                         case RTE_MACVLAN_HASH_MATCH:
4210                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
4211                                 break;
4212                         default:
4213                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
4214                                 ret = I40E_ERR_PARAM;
4215                                 goto DONE;
4216                         }
4217                         req_list[i].flags = rte_cpu_to_le_16(flags);
4218                 }
4219
4220                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
4221                                                 actual_num, NULL);
4222                 if (ret != I40E_SUCCESS) {
4223                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
4224                         goto DONE;
4225                 }
4226                 num += actual_num;
4227         } while (num < total);
4228
4229 DONE:
4230         rte_free(req_list);
4231         return ret;
4232 }
4233
4234 /* Find out specific MAC filter */
4235 static struct i40e_mac_filter *
4236 i40e_find_mac_filter(struct i40e_vsi *vsi,
4237                          struct ether_addr *macaddr)
4238 {
4239         struct i40e_mac_filter *f;
4240
4241         TAILQ_FOREACH(f, &vsi->mac_list, next) {
4242                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
4243                         return f;
4244         }
4245
4246         return NULL;
4247 }
4248
4249 static bool
4250 i40e_find_vlan_filter(struct i40e_vsi *vsi,
4251                          uint16_t vlan_id)
4252 {
4253         uint32_t vid_idx, vid_bit;
4254
4255         if (vlan_id > ETH_VLAN_ID_MAX)
4256                 return 0;
4257
4258         vid_idx = I40E_VFTA_IDX(vlan_id);
4259         vid_bit = I40E_VFTA_BIT(vlan_id);
4260
4261         if (vsi->vfta[vid_idx] & vid_bit)
4262                 return 1;
4263         else
4264                 return 0;
4265 }
4266
4267 static void
4268 i40e_set_vlan_filter(struct i40e_vsi *vsi,
4269                          uint16_t vlan_id, bool on)
4270 {
4271         uint32_t vid_idx, vid_bit;
4272
4273         if (vlan_id > ETH_VLAN_ID_MAX)
4274                 return;
4275
4276         vid_idx = I40E_VFTA_IDX(vlan_id);
4277         vid_bit = I40E_VFTA_BIT(vlan_id);
4278
4279         if (on)
4280                 vsi->vfta[vid_idx] |= vid_bit;
4281         else
4282                 vsi->vfta[vid_idx] &= ~vid_bit;
4283 }
4284
4285 /**
4286  * Find all vlan options for specific mac addr,
4287  * return with actual vlan found.
4288  */
4289 static inline int
4290 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
4291                            struct i40e_macvlan_filter *mv_f,
4292                            int num, struct ether_addr *addr)
4293 {
4294         int i;
4295         uint32_t j, k;
4296
4297         /**
4298          * Not to use i40e_find_vlan_filter to decrease the loop time,
4299          * although the code looks complex.
4300           */
4301         if (num < vsi->vlan_num)
4302                 return I40E_ERR_PARAM;
4303
4304         i = 0;
4305         for (j = 0; j < I40E_VFTA_SIZE; j++) {
4306                 if (vsi->vfta[j]) {
4307                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
4308                                 if (vsi->vfta[j] & (1 << k)) {
4309                                         if (i > num - 1) {
4310                                                 PMD_DRV_LOG(ERR, "vlan number "
4311                                                             "not match");
4312                                                 return I40E_ERR_PARAM;
4313                                         }
4314                                         (void)rte_memcpy(&mv_f[i].macaddr,
4315                                                         addr, ETH_ADDR_LEN);
4316                                         mv_f[i].vlan_id =
4317                                                 j * I40E_UINT32_BIT_SIZE + k;
4318                                         i++;
4319                                 }
4320                         }
4321                 }
4322         }
4323         return I40E_SUCCESS;
4324 }
4325
4326 static inline int
4327 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
4328                            struct i40e_macvlan_filter *mv_f,
4329                            int num,
4330                            uint16_t vlan)
4331 {
4332         int i = 0;
4333         struct i40e_mac_filter *f;
4334
4335         if (num < vsi->mac_num)
4336                 return I40E_ERR_PARAM;
4337
4338         TAILQ_FOREACH(f, &vsi->mac_list, next) {
4339                 if (i > num - 1) {
4340                         PMD_DRV_LOG(ERR, "buffer number not match");
4341                         return I40E_ERR_PARAM;
4342                 }
4343                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4344                                 ETH_ADDR_LEN);
4345                 mv_f[i].vlan_id = vlan;
4346                 mv_f[i].filter_type = f->mac_info.filter_type;
4347                 i++;
4348         }
4349
4350         return I40E_SUCCESS;
4351 }
4352
4353 static int
4354 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
4355 {
4356         int i, num;
4357         struct i40e_mac_filter *f;
4358         struct i40e_macvlan_filter *mv_f;
4359         int ret = I40E_SUCCESS;
4360
4361         if (vsi == NULL || vsi->mac_num == 0)
4362                 return I40E_ERR_PARAM;
4363
4364         /* Case that no vlan is set */
4365         if (vsi->vlan_num == 0)
4366                 num = vsi->mac_num;
4367         else
4368                 num = vsi->mac_num * vsi->vlan_num;
4369
4370         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
4371         if (mv_f == NULL) {
4372                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4373                 return I40E_ERR_NO_MEMORY;
4374         }
4375
4376         i = 0;
4377         if (vsi->vlan_num == 0) {
4378                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4379                         (void)rte_memcpy(&mv_f[i].macaddr,
4380                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
4381                         mv_f[i].vlan_id = 0;
4382                         i++;
4383                 }
4384         } else {
4385                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4386                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
4387                                         vsi->vlan_num, &f->mac_info.mac_addr);
4388                         if (ret != I40E_SUCCESS)
4389                                 goto DONE;
4390                         i += vsi->vlan_num;
4391                 }
4392         }
4393
4394         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
4395 DONE:
4396         rte_free(mv_f);
4397
4398         return ret;
4399 }
4400
4401 int
4402 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4403 {
4404         struct i40e_macvlan_filter *mv_f;
4405         int mac_num;
4406         int ret = I40E_SUCCESS;
4407
4408         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
4409                 return I40E_ERR_PARAM;
4410
4411         /* If it's already set, just return */
4412         if (i40e_find_vlan_filter(vsi,vlan))
4413                 return I40E_SUCCESS;
4414
4415         mac_num = vsi->mac_num;
4416
4417         if (mac_num == 0) {
4418                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4419                 return I40E_ERR_PARAM;
4420         }
4421
4422         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4423
4424         if (mv_f == NULL) {
4425                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4426                 return I40E_ERR_NO_MEMORY;
4427         }
4428
4429         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4430
4431         if (ret != I40E_SUCCESS)
4432                 goto DONE;
4433
4434         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4435
4436         if (ret != I40E_SUCCESS)
4437                 goto DONE;
4438
4439         i40e_set_vlan_filter(vsi, vlan, 1);
4440
4441         vsi->vlan_num++;
4442         ret = I40E_SUCCESS;
4443 DONE:
4444         rte_free(mv_f);
4445         return ret;
4446 }
4447
4448 int
4449 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4450 {
4451         struct i40e_macvlan_filter *mv_f;
4452         int mac_num;
4453         int ret = I40E_SUCCESS;
4454
4455         /**
4456          * Vlan 0 is the generic filter for untagged packets
4457          * and can't be removed.
4458          */
4459         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
4460                 return I40E_ERR_PARAM;
4461
4462         /* If can't find it, just return */
4463         if (!i40e_find_vlan_filter(vsi, vlan))
4464                 return I40E_ERR_PARAM;
4465
4466         mac_num = vsi->mac_num;
4467
4468         if (mac_num == 0) {
4469                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4470                 return I40E_ERR_PARAM;
4471         }
4472
4473         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4474
4475         if (mv_f == NULL) {
4476                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4477                 return I40E_ERR_NO_MEMORY;
4478         }
4479
4480         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4481
4482         if (ret != I40E_SUCCESS)
4483                 goto DONE;
4484
4485         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
4486
4487         if (ret != I40E_SUCCESS)
4488                 goto DONE;
4489
4490         /* This is last vlan to remove, replace all mac filter with vlan 0 */
4491         if (vsi->vlan_num == 1) {
4492                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
4493                 if (ret != I40E_SUCCESS)
4494                         goto DONE;
4495
4496                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4497                 if (ret != I40E_SUCCESS)
4498                         goto DONE;
4499         }
4500
4501         i40e_set_vlan_filter(vsi, vlan, 0);
4502
4503         vsi->vlan_num--;
4504         ret = I40E_SUCCESS;
4505 DONE:
4506         rte_free(mv_f);
4507         return ret;
4508 }
4509
4510 int
4511 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
4512 {
4513         struct i40e_mac_filter *f;
4514         struct i40e_macvlan_filter *mv_f;
4515         int i, vlan_num = 0;
4516         int ret = I40E_SUCCESS;
4517
4518         /* If it's add and we've config it, return */
4519         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
4520         if (f != NULL)
4521                 return I40E_SUCCESS;
4522         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
4523                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
4524
4525                 /**
4526                  * If vlan_num is 0, that's the first time to add mac,
4527                  * set mask for vlan_id 0.
4528                  */
4529                 if (vsi->vlan_num == 0) {
4530                         i40e_set_vlan_filter(vsi, 0, 1);
4531                         vsi->vlan_num = 1;
4532                 }
4533                 vlan_num = vsi->vlan_num;
4534         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
4535                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
4536                 vlan_num = 1;
4537
4538         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4539         if (mv_f == NULL) {
4540                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4541                 return I40E_ERR_NO_MEMORY;
4542         }
4543
4544         for (i = 0; i < vlan_num; i++) {
4545                 mv_f[i].filter_type = mac_filter->filter_type;
4546                 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
4547                                 ETH_ADDR_LEN);
4548         }
4549
4550         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4551                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
4552                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
4553                                         &mac_filter->mac_addr);
4554                 if (ret != I40E_SUCCESS)
4555                         goto DONE;
4556         }
4557
4558         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
4559         if (ret != I40E_SUCCESS)
4560                 goto DONE;
4561
4562         /* Add the mac addr into mac list */
4563         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4564         if (f == NULL) {
4565                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4566                 ret = I40E_ERR_NO_MEMORY;
4567                 goto DONE;
4568         }
4569         (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
4570                         ETH_ADDR_LEN);
4571         f->mac_info.filter_type = mac_filter->filter_type;
4572         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4573         vsi->mac_num++;
4574
4575         ret = I40E_SUCCESS;
4576 DONE:
4577         rte_free(mv_f);
4578
4579         return ret;
4580 }
4581
4582 int
4583 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
4584 {
4585         struct i40e_mac_filter *f;
4586         struct i40e_macvlan_filter *mv_f;
4587         int i, vlan_num;
4588         enum rte_mac_filter_type filter_type;
4589         int ret = I40E_SUCCESS;
4590
4591         /* Can't find it, return an error */
4592         f = i40e_find_mac_filter(vsi, addr);
4593         if (f == NULL)
4594                 return I40E_ERR_PARAM;
4595
4596         vlan_num = vsi->vlan_num;
4597         filter_type = f->mac_info.filter_type;
4598         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4599                 filter_type == RTE_MACVLAN_HASH_MATCH) {
4600                 if (vlan_num == 0) {
4601                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
4602                         return I40E_ERR_PARAM;
4603                 }
4604         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
4605                         filter_type == RTE_MAC_HASH_MATCH)
4606                 vlan_num = 1;
4607
4608         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4609         if (mv_f == NULL) {
4610                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4611                 return I40E_ERR_NO_MEMORY;
4612         }
4613
4614         for (i = 0; i < vlan_num; i++) {
4615                 mv_f[i].filter_type = filter_type;
4616                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4617                                 ETH_ADDR_LEN);
4618         }
4619         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4620                         filter_type == RTE_MACVLAN_HASH_MATCH) {
4621                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
4622                 if (ret != I40E_SUCCESS)
4623                         goto DONE;
4624         }
4625
4626         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
4627         if (ret != I40E_SUCCESS)
4628                 goto DONE;
4629
4630         /* Remove the mac addr into mac list */
4631         TAILQ_REMOVE(&vsi->mac_list, f, next);
4632         rte_free(f);
4633         vsi->mac_num--;
4634
4635         ret = I40E_SUCCESS;
4636 DONE:
4637         rte_free(mv_f);
4638         return ret;
4639 }
4640
4641 /* Configure hash enable flags for RSS */
4642 uint64_t
4643 i40e_config_hena(uint64_t flags)
4644 {
4645         uint64_t hena = 0;
4646
4647         if (!flags)
4648                 return hena;
4649
4650         if (flags & ETH_RSS_FRAG_IPV4)
4651                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
4652         if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
4653                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
4654         if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
4655                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4656         if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
4657                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
4658         if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
4659                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
4660         if (flags & ETH_RSS_FRAG_IPV6)
4661                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
4662         if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
4663                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
4664         if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
4665                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
4666         if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
4667                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
4668         if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
4669                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
4670         if (flags & ETH_RSS_L2_PAYLOAD)
4671                 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
4672
4673         return hena;
4674 }
4675
4676 /* Parse the hash enable flags */
4677 uint64_t
4678 i40e_parse_hena(uint64_t flags)
4679 {
4680         uint64_t rss_hf = 0;
4681
4682         if (!flags)
4683                 return rss_hf;
4684         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
4685                 rss_hf |= ETH_RSS_FRAG_IPV4;
4686         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
4687                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
4688         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
4689                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
4690         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
4691                 rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
4692         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
4693                 rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
4694         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
4695                 rss_hf |= ETH_RSS_FRAG_IPV6;
4696         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
4697                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
4698         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
4699                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
4700         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
4701                 rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
4702         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
4703                 rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
4704         if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
4705                 rss_hf |= ETH_RSS_L2_PAYLOAD;
4706
4707         return rss_hf;
4708 }
4709
4710 /* Disable RSS */
4711 static void
4712 i40e_pf_disable_rss(struct i40e_pf *pf)
4713 {
4714         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4715         uint64_t hena;
4716
4717         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4718         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4719         hena &= ~I40E_RSS_HENA_ALL;
4720         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4721         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4722         I40E_WRITE_FLUSH(hw);
4723 }
4724
4725 static int
4726 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4727 {
4728         uint32_t *hash_key;
4729         uint8_t hash_key_len;
4730         uint64_t rss_hf;
4731         uint16_t i;
4732         uint64_t hena;
4733
4734         hash_key = (uint32_t *)(rss_conf->rss_key);
4735         hash_key_len = rss_conf->rss_key_len;
4736         if (hash_key != NULL && hash_key_len >=
4737                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4738                 /* Fill in RSS hash key */
4739                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4740                         I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4741         }
4742
4743         rss_hf = rss_conf->rss_hf;
4744         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4745         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4746         hena &= ~I40E_RSS_HENA_ALL;
4747         hena |= i40e_config_hena(rss_hf);
4748         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4749         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4750         I40E_WRITE_FLUSH(hw);
4751
4752         return 0;
4753 }
4754
4755 static int
4756 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4757                          struct rte_eth_rss_conf *rss_conf)
4758 {
4759         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4760         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4761         uint64_t hena;
4762
4763         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4764         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4765         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4766                 if (rss_hf != 0) /* Enable RSS */
4767                         return -EINVAL;
4768                 return 0; /* Nothing to do */
4769         }
4770         /* RSS enabled */
4771         if (rss_hf == 0) /* Disable RSS */
4772                 return -EINVAL;
4773
4774         return i40e_hw_rss_hash_set(hw, rss_conf);
4775 }
4776
4777 static int
4778 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4779                            struct rte_eth_rss_conf *rss_conf)
4780 {
4781         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4782         uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4783         uint64_t hena;
4784         uint16_t i;
4785
4786         if (hash_key != NULL) {
4787                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4788                         hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4789                 rss_conf->rss_key_len = i * sizeof(uint32_t);
4790         }
4791         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4792         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4793         rss_conf->rss_hf = i40e_parse_hena(hena);
4794
4795         return 0;
4796 }
4797
4798 static int
4799 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
4800 {
4801         switch (filter_type) {
4802         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
4803                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
4804                 break;
4805         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
4806                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
4807                 break;
4808         case RTE_TUNNEL_FILTER_IMAC_TENID:
4809                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
4810                 break;
4811         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
4812                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
4813                 break;
4814         case ETH_TUNNEL_FILTER_IMAC:
4815                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
4816                 break;
4817         default:
4818                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
4819                 return -EINVAL;
4820         }
4821
4822         return 0;
4823 }
4824
4825 static int
4826 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
4827                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
4828                         uint8_t add)
4829 {
4830         uint16_t ip_type;
4831         uint8_t tun_type = 0;
4832         int val, ret = 0;
4833         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4834         struct i40e_vsi *vsi = pf->main_vsi;
4835         struct i40e_aqc_add_remove_cloud_filters_element_data  *cld_filter;
4836         struct i40e_aqc_add_remove_cloud_filters_element_data  *pfilter;
4837
4838         cld_filter = rte_zmalloc("tunnel_filter",
4839                 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
4840                 0);
4841
4842         if (NULL == cld_filter) {
4843                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
4844                 return -EINVAL;
4845         }
4846         pfilter = cld_filter;
4847
4848         (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
4849                         sizeof(struct ether_addr));
4850         (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
4851                         sizeof(struct ether_addr));
4852
4853         pfilter->inner_vlan = tunnel_filter->inner_vlan;
4854         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
4855                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
4856                 (void)rte_memcpy(&pfilter->ipaddr.v4.data,
4857                                 &tunnel_filter->ip_addr,
4858                                 sizeof(pfilter->ipaddr.v4.data));
4859         } else {
4860                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
4861                 (void)rte_memcpy(&pfilter->ipaddr.v6.data,
4862                                 &tunnel_filter->ip_addr,
4863                                 sizeof(pfilter->ipaddr.v6.data));
4864         }
4865
4866         /* check tunneled type */
4867         switch (tunnel_filter->tunnel_type) {
4868         case RTE_TUNNEL_TYPE_VXLAN:
4869                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
4870                 break;
4871         case RTE_TUNNEL_TYPE_NVGRE:
4872                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
4873                 break;
4874         default:
4875                 /* Other tunnel types is not supported. */
4876                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
4877                 rte_free(cld_filter);
4878                 return -EINVAL;
4879         }
4880
4881         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
4882                                                 &pfilter->flags);
4883         if (val < 0) {
4884                 rte_free(cld_filter);
4885                 return -EINVAL;
4886         }
4887
4888         pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
4889                 (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
4890         pfilter->tenant_id = tunnel_filter->tenant_id;
4891         pfilter->queue_number = tunnel_filter->queue_id;
4892
4893         if (add)
4894                 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
4895         else
4896                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4897                                                 cld_filter, 1);
4898
4899         rte_free(cld_filter);
4900         return ret;
4901 }
4902
4903 static int
4904 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
4905 {
4906         uint8_t i;
4907
4908         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
4909                 if (pf->vxlan_ports[i] == port)
4910                         return i;
4911         }
4912
4913         return -1;
4914 }
4915
4916 static int
4917 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
4918 {
4919         int  idx, ret;
4920         uint8_t filter_idx;
4921         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4922
4923         idx = i40e_get_vxlan_port_idx(pf, port);
4924
4925         /* Check if port already exists */
4926         if (idx >= 0) {
4927                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
4928                 return -EINVAL;
4929         }
4930
4931         /* Now check if there is space to add the new port */
4932         idx = i40e_get_vxlan_port_idx(pf, 0);
4933         if (idx < 0) {
4934                 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
4935                         "not adding port %d", port);
4936                 return -ENOSPC;
4937         }
4938
4939         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
4940                                         &filter_idx, NULL);
4941         if (ret < 0) {
4942                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
4943                 return -1;
4944         }
4945
4946         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
4947                          port,  filter_idx);
4948
4949         /* New port: add it and mark its index in the bitmap */
4950         pf->vxlan_ports[idx] = port;
4951         pf->vxlan_bitmap |= (1 << idx);
4952
4953         if (!(pf->flags & I40E_FLAG_VXLAN))
4954                 pf->flags |= I40E_FLAG_VXLAN;
4955
4956         return 0;
4957 }
4958
4959 static int
4960 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
4961 {
4962         int idx;
4963         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4964
4965         if (!(pf->flags & I40E_FLAG_VXLAN)) {
4966                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
4967                 return -EINVAL;
4968         }
4969
4970         idx = i40e_get_vxlan_port_idx(pf, port);
4971
4972         if (idx < 0) {
4973                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
4974                 return -EINVAL;
4975         }
4976
4977         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
4978                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
4979                 return -1;
4980         }
4981
4982         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
4983                         port, idx);
4984
4985         pf->vxlan_ports[idx] = 0;
4986         pf->vxlan_bitmap &= ~(1 << idx);
4987
4988         if (!pf->vxlan_bitmap)
4989                 pf->flags &= ~I40E_FLAG_VXLAN;
4990
4991         return 0;
4992 }
4993
4994 /* Add UDP tunneling port */
4995 static int
4996 i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
4997                         struct rte_eth_udp_tunnel *udp_tunnel)
4998 {
4999         int ret = 0;
5000         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5001
5002         if (udp_tunnel == NULL)
5003                 return -EINVAL;
5004
5005         switch (udp_tunnel->prot_type) {
5006         case RTE_TUNNEL_TYPE_VXLAN:
5007                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
5008                 break;
5009
5010         case RTE_TUNNEL_TYPE_GENEVE:
5011         case RTE_TUNNEL_TYPE_TEREDO:
5012                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
5013                 ret = -1;
5014                 break;
5015
5016         default:
5017                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5018                 ret = -1;
5019                 break;
5020         }
5021
5022         return ret;
5023 }
5024
5025 /* Remove UDP tunneling port */
5026 static int
5027 i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
5028                         struct rte_eth_udp_tunnel *udp_tunnel)
5029 {
5030         int ret = 0;
5031         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5032
5033         if (udp_tunnel == NULL)
5034                 return -EINVAL;
5035
5036         switch (udp_tunnel->prot_type) {
5037         case RTE_TUNNEL_TYPE_VXLAN:
5038                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
5039                 break;
5040         case RTE_TUNNEL_TYPE_GENEVE:
5041         case RTE_TUNNEL_TYPE_TEREDO:
5042                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
5043                 ret = -1;
5044                 break;
5045         default:
5046                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5047                 ret = -1;
5048                 break;
5049         }
5050
5051         return ret;
5052 }
5053
5054 /* Calculate the maximum number of contiguous PF queues that are configured */
5055 static int
5056 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
5057 {
5058         struct rte_eth_dev_data *data = pf->dev_data;
5059         int i, num;
5060         struct i40e_rx_queue *rxq;
5061
5062         num = 0;
5063         for (i = 0; i < pf->lan_nb_qps; i++) {
5064                 rxq = data->rx_queues[i];
5065                 if (rxq && rxq->q_set)
5066                         num++;
5067                 else
5068                         break;
5069         }
5070
5071         return num;
5072 }
5073
5074 /* Configure RSS */
5075 static int
5076 i40e_pf_config_rss(struct i40e_pf *pf)
5077 {
5078         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5079         struct rte_eth_rss_conf rss_conf;
5080         uint32_t i, lut = 0;
5081         uint16_t j, num;
5082
5083         /*
5084          * If both VMDQ and RSS enabled, not all of PF queues are configured.
5085          * It's necessary to calulate the actual PF queues that are configured.
5086          */
5087         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
5088                 num = i40e_pf_calc_configured_queues_num(pf);
5089                 num = i40e_align_floor(num);
5090         } else
5091                 num = i40e_align_floor(pf->dev_data->nb_rx_queues);
5092
5093         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
5094                         num);
5095
5096         if (num == 0) {
5097                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
5098                 return -ENOTSUP;
5099         }
5100
5101         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
5102                 if (j == num)
5103                         j = 0;
5104                 lut = (lut << 8) | (j & ((0x1 <<
5105                         hw->func_caps.rss_table_entry_width) - 1));
5106                 if ((i & 3) == 3)
5107                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
5108         }
5109
5110         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
5111         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
5112                 i40e_pf_disable_rss(pf);
5113                 return 0;
5114         }
5115         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
5116                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
5117                 /* Random default keys */
5118                 static uint32_t rss_key_default[] = {0x6b793944,
5119                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
5120                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
5121                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
5122
5123                 rss_conf.rss_key = (uint8_t *)rss_key_default;
5124                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
5125                                                         sizeof(uint32_t);
5126         }
5127
5128         return i40e_hw_rss_hash_set(hw, &rss_conf);
5129 }
5130
5131 static int
5132 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
5133                         struct rte_eth_tunnel_filter_conf *filter)
5134 {
5135         if (pf == NULL || filter == NULL) {
5136                 PMD_DRV_LOG(ERR, "Invalid parameter");
5137                 return -EINVAL;
5138         }
5139
5140         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
5141                 PMD_DRV_LOG(ERR, "Invalid queue ID");
5142                 return -EINVAL;
5143         }
5144
5145         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
5146                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
5147                 return -EINVAL;
5148         }
5149
5150         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
5151                 (is_zero_ether_addr(filter->outer_mac))) {
5152                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
5153                 return -EINVAL;
5154         }
5155
5156         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
5157                 (is_zero_ether_addr(filter->inner_mac))) {
5158                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
5159                 return -EINVAL;
5160         }
5161
5162         return 0;
5163 }
5164
5165 static int
5166 i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
5167                         void *arg)
5168 {
5169         struct rte_eth_tunnel_filter_conf *filter;
5170         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5171         int ret = I40E_SUCCESS;
5172
5173         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
5174
5175         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
5176                 return I40E_ERR_PARAM;
5177
5178         switch (filter_op) {
5179         case RTE_ETH_FILTER_NOP:
5180                 if (!(pf->flags & I40E_FLAG_VXLAN))
5181                         ret = I40E_NOT_SUPPORTED;
5182         case RTE_ETH_FILTER_ADD:
5183                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
5184                 break;
5185         case RTE_ETH_FILTER_DELETE:
5186                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
5187                 break;
5188         default:
5189                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
5190                 ret = I40E_ERR_PARAM;
5191                 break;
5192         }
5193
5194         return ret;
5195 }
5196
5197 static int
5198 i40e_pf_config_mq_rx(struct i40e_pf *pf)
5199 {
5200         int ret = 0;
5201         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
5202
5203         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
5204                 PMD_INIT_LOG(ERR, "i40e doesn't support DCB yet");
5205                 return -ENOTSUP;
5206         }
5207
5208         /* RSS setup */
5209         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
5210                 ret = i40e_pf_config_rss(pf);
5211         else
5212                 i40e_pf_disable_rss(pf);
5213
5214         return ret;
5215 }
5216
5217 /* Get the symmetric hash enable configurations per port */
5218 static void
5219 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
5220 {
5221         uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
5222
5223         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
5224 }
5225
5226 /* Set the symmetric hash enable configurations per port */
5227 static void
5228 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
5229 {
5230         uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
5231
5232         if (enable > 0) {
5233                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
5234                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
5235                                                         "been enabled");
5236                         return;
5237                 }
5238                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
5239         } else {
5240                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
5241                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
5242                                                         "been disabled");
5243                         return;
5244                 }
5245                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
5246         }
5247         I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg);
5248         I40E_WRITE_FLUSH(hw);
5249 }
5250
5251 /*
5252  * Get global configurations of hash function type and symmetric hash enable
5253  * per flow type (pctype). Note that global configuration means it affects all
5254  * the ports on the same NIC.
5255  */
5256 static int
5257 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
5258                                    struct rte_eth_hash_global_conf *g_cfg)
5259 {
5260         uint32_t reg, mask = I40E_FLOW_TYPES;
5261         uint16_t i;
5262         enum i40e_filter_pctype pctype;
5263
5264         memset(g_cfg, 0, sizeof(*g_cfg));
5265         reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
5266         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
5267                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
5268         else
5269                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
5270         PMD_DRV_LOG(DEBUG, "Hash function is %s",
5271                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
5272
5273         for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
5274                 if (!(mask & (1UL << i)))
5275                         continue;
5276                 mask &= ~(1UL << i);
5277                 /* Bit set indicats the coresponding flow type is supported */
5278                 g_cfg->valid_bit_mask[0] |= (1UL << i);
5279                 pctype = i40e_flowtype_to_pctype(i);
5280                 reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype));
5281                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
5282                         g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
5283         }
5284
5285         return 0;
5286 }
5287
5288 static int
5289 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
5290 {
5291         uint32_t i;
5292         uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
5293
5294         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
5295                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
5296                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
5297                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
5298                                                 g_cfg->hash_func);
5299                 return -EINVAL;
5300         }
5301
5302         /*
5303          * As i40e supports less than 32 flow types, only first 32 bits need to
5304          * be checked.
5305          */
5306         mask0 = g_cfg->valid_bit_mask[0];
5307         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
5308                 if (i == 0) {
5309                         /* Check if any unsupported flow type configured */
5310                         if ((mask0 | i40e_mask) ^ i40e_mask)
5311                                 goto mask_err;
5312                 } else {
5313                         if (g_cfg->valid_bit_mask[i])
5314                                 goto mask_err;
5315                 }
5316         }
5317
5318         return 0;
5319
5320 mask_err:
5321         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
5322
5323         return -EINVAL;
5324 }
5325
5326 /*
5327  * Set global configurations of hash function type and symmetric hash enable
5328  * per flow type (pctype). Note any modifying global configuration will affect
5329  * all the ports on the same NIC.
5330  */
5331 static int
5332 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
5333                                    struct rte_eth_hash_global_conf *g_cfg)
5334 {
5335         int ret;
5336         uint16_t i;
5337         uint32_t reg;
5338         uint32_t mask0 = g_cfg->valid_bit_mask[0];
5339         enum i40e_filter_pctype pctype;
5340
5341         /* Check the input parameters */
5342         ret = i40e_hash_global_config_check(g_cfg);
5343         if (ret < 0)
5344                 return ret;
5345
5346         for (i = 0; mask0 && i < UINT32_BIT; i++) {
5347                 if (!(mask0 & (1UL << i)))
5348                         continue;
5349                 mask0 &= ~(1UL << i);
5350                 pctype = i40e_flowtype_to_pctype(i);
5351                 reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
5352                                 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
5353                 I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg);
5354         }
5355
5356         reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
5357         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
5358                 /* Toeplitz */
5359                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
5360                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
5361                                                                 "Toeplitz");
5362                         goto out;
5363                 }
5364                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
5365         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
5366                 /* Simple XOR */
5367                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
5368                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
5369                                                         "Simple XOR");
5370                         goto out;
5371                 }
5372                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
5373         } else
5374                 /* Use the default, and keep it as it is */
5375                 goto out;
5376
5377         I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg);
5378
5379 out:
5380         I40E_WRITE_FLUSH(hw);
5381
5382         return 0;
5383 }
5384
5385 static int
5386 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
5387 {
5388         int ret = 0;
5389
5390         if (!hw || !info) {
5391                 PMD_DRV_LOG(ERR, "Invalid pointer");
5392                 return -EFAULT;
5393         }
5394
5395         switch (info->info_type) {
5396         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
5397                 i40e_get_symmetric_hash_enable_per_port(hw,
5398                                         &(info->info.enable));
5399                 break;
5400         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
5401                 ret = i40e_get_hash_filter_global_config(hw,
5402                                 &(info->info.global_conf));
5403                 break;
5404         default:
5405                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
5406                                                         info->info_type);
5407                 ret = -EINVAL;
5408                 break;
5409         }
5410
5411         return ret;
5412 }
5413
5414 static int
5415 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
5416 {
5417         int ret = 0;
5418
5419         if (!hw || !info) {
5420                 PMD_DRV_LOG(ERR, "Invalid pointer");
5421                 return -EFAULT;
5422         }
5423
5424         switch (info->info_type) {
5425         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
5426                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
5427                 break;
5428         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
5429                 ret = i40e_set_hash_filter_global_config(hw,
5430                                 &(info->info.global_conf));
5431                 break;
5432         default:
5433                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
5434                                                         info->info_type);
5435                 ret = -EINVAL;
5436                 break;
5437         }
5438
5439         return ret;
5440 }
5441
5442 /* Operations for hash function */
5443 static int
5444 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
5445                       enum rte_filter_op filter_op,
5446                       void *arg)
5447 {
5448         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5449         int ret = 0;
5450
5451         switch (filter_op) {
5452         case RTE_ETH_FILTER_NOP:
5453                 break;
5454         case RTE_ETH_FILTER_GET:
5455                 ret = i40e_hash_filter_get(hw,
5456                         (struct rte_eth_hash_filter_info *)arg);
5457                 break;
5458         case RTE_ETH_FILTER_SET:
5459                 ret = i40e_hash_filter_set(hw,
5460                         (struct rte_eth_hash_filter_info *)arg);
5461                 break;
5462         default:
5463                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
5464                                                                 filter_op);
5465                 ret = -ENOTSUP;
5466                 break;
5467         }
5468
5469         return ret;
5470 }
5471
5472 /*
5473  * Configure ethertype filter, which can director packet by filtering
5474  * with mac address and ether_type or only ether_type
5475  */
5476 static int
5477 i40e_ethertype_filter_set(struct i40e_pf *pf,
5478                         struct rte_eth_ethertype_filter *filter,
5479                         bool add)
5480 {
5481         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5482         struct i40e_control_filter_stats stats;
5483         uint16_t flags = 0;
5484         int ret;
5485
5486         if (filter->queue >= pf->dev_data->nb_rx_queues) {
5487                 PMD_DRV_LOG(ERR, "Invalid queue ID");
5488                 return -EINVAL;
5489         }
5490         if (filter->ether_type == ETHER_TYPE_IPv4 ||
5491                 filter->ether_type == ETHER_TYPE_IPv6) {
5492                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
5493                         " control packet filter.", filter->ether_type);
5494                 return -EINVAL;
5495         }
5496         if (filter->ether_type == ETHER_TYPE_VLAN)
5497                 PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
5498                         " not supported.");
5499
5500         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5501                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5502         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5503                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5504         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5505
5506         memset(&stats, 0, sizeof(stats));
5507         ret = i40e_aq_add_rem_control_packet_filter(hw,
5508                         filter->mac_addr.addr_bytes,
5509                         filter->ether_type, flags,
5510                         pf->main_vsi->seid,
5511                         filter->queue, add, &stats, NULL);
5512
5513         PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
5514                          " mac_etype_used = %u, etype_used = %u,"
5515                          " mac_etype_free = %u, etype_free = %u\n",
5516                          ret, stats.mac_etype_used, stats.etype_used,
5517                          stats.mac_etype_free, stats.etype_free);
5518         if (ret < 0)
5519                 return -ENOSYS;
5520         return 0;
5521 }
5522
5523 /*
5524  * Handle operations for ethertype filter.
5525  */
5526 static int
5527 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
5528                                 enum rte_filter_op filter_op,
5529                                 void *arg)
5530 {
5531         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5532         int ret = 0;
5533
5534         if (filter_op == RTE_ETH_FILTER_NOP)
5535                 return ret;
5536
5537         if (arg == NULL) {
5538                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
5539                             filter_op);
5540                 return -EINVAL;
5541         }
5542
5543         switch (filter_op) {
5544         case RTE_ETH_FILTER_ADD:
5545                 ret = i40e_ethertype_filter_set(pf,
5546                         (struct rte_eth_ethertype_filter *)arg,
5547                         TRUE);
5548                 break;
5549         case RTE_ETH_FILTER_DELETE:
5550                 ret = i40e_ethertype_filter_set(pf,
5551                         (struct rte_eth_ethertype_filter *)arg,
5552                         FALSE);
5553                 break;
5554         default:
5555                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
5556                 ret = -ENOSYS;
5557                 break;
5558         }
5559         return ret;
5560 }
5561
5562 static int
5563 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
5564                      enum rte_filter_type filter_type,
5565                      enum rte_filter_op filter_op,
5566                      void *arg)
5567 {
5568         int ret = 0;
5569
5570         if (dev == NULL)
5571                 return -EINVAL;
5572
5573         switch (filter_type) {
5574         case RTE_ETH_FILTER_HASH:
5575                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
5576                 break;
5577         case RTE_ETH_FILTER_MACVLAN:
5578                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
5579                 break;
5580         case RTE_ETH_FILTER_ETHERTYPE:
5581                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
5582                 break;
5583         case RTE_ETH_FILTER_TUNNEL:
5584                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
5585                 break;
5586         case RTE_ETH_FILTER_FDIR:
5587                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
5588                 break;
5589         default:
5590                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5591                                                         filter_type);
5592                 ret = -EINVAL;
5593                 break;
5594         }
5595
5596         return ret;
5597 }
5598
5599 /*
5600  * As some registers wouldn't be reset unless a global hardware reset,
5601  * hardware initialization is needed to put those registers into an
5602  * expected initial state.
5603  */
5604 static void
5605 i40e_hw_init(struct i40e_hw *hw)
5606 {
5607         /* clear the PF Queue Filter control register */
5608         I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0);
5609
5610         /* Disable symmetric hash per port */
5611         i40e_set_symmetric_hash_enable_per_port(hw, 0);
5612 }
5613
5614 enum i40e_filter_pctype
5615 i40e_flowtype_to_pctype(uint16_t flow_type)
5616 {
5617         static const enum i40e_filter_pctype pctype_table[] = {
5618                 [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
5619                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
5620                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
5621                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
5622                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
5623                 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
5624                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
5625                 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
5626                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
5627                 [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
5628                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
5629                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
5630                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
5631                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
5632                 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
5633                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
5634                 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
5635                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
5636                 [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
5637         };
5638
5639         return pctype_table[flow_type];
5640 }
5641
5642 uint16_t
5643 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
5644 {
5645         static const uint16_t flowtype_table[] = {
5646                 [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
5647                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
5648                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
5649                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
5650                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
5651                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
5652                         RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
5653                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
5654                         RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
5655                 [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
5656                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
5657                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
5658                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
5659                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
5660                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
5661                         RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
5662                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
5663                         RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
5664                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
5665         };
5666
5667         return flowtype_table[pctype];
5668 }
5669
5670 /*
5671  * On X710, performance number is far from the expectation on recent firmware
5672  * versions; on XL710, performance number is also far from the expectation on
5673  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
5674  * mode is enabled and port MAC address is equal to the packet destination MAC
5675  * address. The fix for this issue may not be integrated in the following
5676  * firmware version. So the workaround in software driver is needed. It needs
5677  * to modify the initial values of 3 internal only registers for both X710 and
5678  * XL710. Note that the values for X710 or XL710 could be different, and the
5679  * workaround can be removed when it is fixed in firmware in the future.
5680  */
5681
5682 /* For both X710 and XL710 */
5683 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
5684 #define I40E_GL_SWR_PRI_JOIN_MAP_0       0x26CE00
5685
5686 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
5687 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
5688
5689 /* For X710 */
5690 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
5691 /* For XL710 */
5692 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
5693 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
5694
5695 static void
5696 i40e_configure_registers(struct i40e_hw *hw)
5697 {
5698         static struct {
5699                 uint32_t addr;
5700                 uint64_t val;
5701         } reg_table[] = {
5702                 {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
5703                 {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
5704                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
5705         };
5706         uint64_t reg;
5707         uint32_t i;
5708         int ret;
5709
5710         for (i = 0; i < RTE_DIM(reg_table); i++) {
5711                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
5712                         if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
5713                                 reg_table[i].val =
5714                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
5715                         else /* For X710 */
5716                                 reg_table[i].val =
5717                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
5718                 }
5719
5720                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
5721                                                         &reg, NULL);
5722                 if (ret < 0) {
5723                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
5724                                                         reg_table[i].addr);
5725                         break;
5726                 }
5727                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
5728                                                 reg_table[i].addr, reg);
5729                 if (reg == reg_table[i].val)
5730                         continue;
5731
5732                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
5733                                                 reg_table[i].val, NULL);
5734                 if (ret < 0) {
5735                         PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
5736                                 "address of 0x%"PRIx32, reg_table[i].val,
5737                                                         reg_table[i].addr);
5738                         break;
5739                 }
5740                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
5741                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
5742         }
5743 }
5744
5745 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
5746 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
5747 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
5748 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
5749 static int
5750 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
5751 {
5752         uint32_t reg;
5753         int ret;
5754
5755         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
5756                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
5757                 return -EINVAL;
5758         }
5759
5760         /* Configure for double VLAN RX stripping */
5761         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
5762         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
5763                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
5764                 ret = i40e_aq_debug_write_register(hw,
5765                                                    I40E_VSI_TSR(vsi->vsi_id),
5766                                                    reg, NULL);
5767                 if (ret < 0) {
5768                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
5769                                     vsi->vsi_id);
5770                         return I40E_ERR_CONFIG;
5771                 }
5772         }
5773
5774         /* Configure for double VLAN TX insertion */
5775         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
5776         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
5777                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
5778                 ret = i40e_aq_debug_write_register(hw,
5779                                                    I40E_VSI_L2TAGSTXVALID(
5780                                                    vsi->vsi_id), reg, NULL);
5781                 if (ret < 0) {
5782                         PMD_DRV_LOG(ERR, "Failed to update "
5783                                 "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
5784                         return I40E_ERR_CONFIG;
5785                 }
5786         }
5787
5788         return 0;
5789 }
5790
5791 /**
5792  * i40e_aq_add_mirror_rule
5793  * @hw: pointer to the hardware structure
5794  * @seid: VEB seid to add mirror rule to
5795  * @dst_id: destination vsi seid
5796  * @entries: Buffer which contains the entities to be mirrored
5797  * @count: number of entities contained in the buffer
5798  * @rule_id:the rule_id of the rule to be added
5799  *
5800  * Add a mirror rule for a given veb.
5801  *
5802  **/
5803 static enum i40e_status_code
5804 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
5805                         uint16_t seid, uint16_t dst_id,
5806                         uint16_t rule_type, uint16_t *entries,
5807                         uint16_t count, uint16_t *rule_id)
5808 {
5809         struct i40e_aq_desc desc;
5810         struct i40e_aqc_add_delete_mirror_rule cmd;
5811         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
5812                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
5813                 &desc.params.raw;
5814         uint16_t buff_len;
5815         enum i40e_status_code status;
5816
5817         i40e_fill_default_direct_cmd_desc(&desc,
5818                                           i40e_aqc_opc_add_mirror_rule);
5819         memset(&cmd, 0, sizeof(cmd));
5820
5821         buff_len = sizeof(uint16_t) * count;
5822         desc.datalen = rte_cpu_to_le_16(buff_len);
5823         if (buff_len > 0)
5824                 desc.flags |= rte_cpu_to_le_16(
5825                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5826         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
5827                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
5828         cmd.num_entries = rte_cpu_to_le_16(count);
5829         cmd.seid = rte_cpu_to_le_16(seid);
5830         cmd.destination = rte_cpu_to_le_16(dst_id);
5831
5832         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
5833         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
5834         PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
5835                          "rule_id = %u"
5836                          " mirror_rules_used = %u, mirror_rules_free = %u,",
5837                          hw->aq.asq_last_status, resp->rule_id,
5838                          resp->mirror_rules_used, resp->mirror_rules_free);
5839         *rule_id = rte_le_to_cpu_16(resp->rule_id);
5840
5841         return status;
5842 }
5843
5844 /**
5845  * i40e_aq_del_mirror_rule
5846  * @hw: pointer to the hardware structure
5847  * @seid: VEB seid to add mirror rule to
5848  * @entries: Buffer which contains the entities to be mirrored
5849  * @count: number of entities contained in the buffer
5850  * @rule_id:the rule_id of the rule to be delete
5851  *
5852  * Delete a mirror rule for a given veb.
5853  *
5854  **/
5855 static enum i40e_status_code
5856 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
5857                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
5858                 uint16_t count, uint16_t rule_id)
5859 {
5860         struct i40e_aq_desc desc;
5861         struct i40e_aqc_add_delete_mirror_rule cmd;
5862         uint16_t buff_len = 0;
5863         enum i40e_status_code status;
5864         void *buff = NULL;
5865
5866         i40e_fill_default_direct_cmd_desc(&desc,
5867                                           i40e_aqc_opc_delete_mirror_rule);
5868         memset(&cmd, 0, sizeof(cmd));
5869         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
5870                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
5871                                                           I40E_AQ_FLAG_RD));
5872                 cmd.num_entries = count;
5873                 buff_len = sizeof(uint16_t) * count;
5874                 desc.datalen = rte_cpu_to_le_16(buff_len);
5875                 buff = (void *)entries;
5876         } else
5877                 /* rule id is filled in destination field for deleting mirror rule */
5878                 cmd.destination = rte_cpu_to_le_16(rule_id);
5879
5880         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
5881                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
5882         cmd.seid = rte_cpu_to_le_16(seid);
5883
5884         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
5885         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
5886
5887         return status;
5888 }
5889
5890 /**
5891  * i40e_mirror_rule_set
5892  * @dev: pointer to the hardware structure
5893  * @mirror_conf: mirror rule info
5894  * @sw_id: mirror rule's sw_id
5895  * @on: enable/disable
5896  *
5897  * set a mirror rule.
5898  *
5899  **/
5900 static int
5901 i40e_mirror_rule_set(struct rte_eth_dev *dev,
5902                         struct rte_eth_mirror_conf *mirror_conf,
5903                         uint8_t sw_id, uint8_t on)
5904 {
5905         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5906         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5907         struct i40e_mirror_rule *it, *mirr_rule = NULL;
5908         struct i40e_mirror_rule *parent = NULL;
5909         uint16_t seid, dst_seid, rule_id;
5910         uint16_t i, j = 0;
5911         int ret;
5912
5913         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
5914
5915         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
5916                 PMD_DRV_LOG(ERR, "mirror rule can not be configured"
5917                         " without veb or vfs.");
5918                 return -ENOSYS;
5919         }
5920         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
5921                 PMD_DRV_LOG(ERR, "mirror table is full.");
5922                 return -ENOSPC;
5923         }
5924         if (mirror_conf->dst_pool > pf->vf_num) {
5925                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
5926                                  mirror_conf->dst_pool);
5927                 return -EINVAL;
5928         }
5929
5930         seid = pf->main_vsi->veb->seid;
5931
5932         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
5933                 if (sw_id <= it->index) {
5934                         mirr_rule = it;
5935                         break;
5936                 }
5937                 parent = it;
5938         }
5939         if (mirr_rule && sw_id == mirr_rule->index) {
5940                 if (on) {
5941                         PMD_DRV_LOG(ERR, "mirror rule exists.");
5942                         return -EEXIST;
5943                 } else {
5944                         ret = i40e_aq_del_mirror_rule(hw, seid,
5945                                         mirr_rule->rule_type,
5946                                         mirr_rule->entries,
5947                                         mirr_rule->num_entries, mirr_rule->id);
5948                         if (ret < 0) {
5949                                 PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
5950                                                    " ret = %d, aq_err = %d.",
5951                                                    ret, hw->aq.asq_last_status);
5952                                 return -ENOSYS;
5953                         }
5954                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
5955                         rte_free(mirr_rule);
5956                         pf->nb_mirror_rule--;
5957                         return 0;
5958                 }
5959         } else if (!on) {
5960                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
5961                 return -ENOENT;
5962         }
5963
5964         mirr_rule = rte_zmalloc("i40e_mirror_rule",
5965                                 sizeof(struct i40e_mirror_rule) , 0);
5966         if (!mirr_rule) {
5967                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5968                 return I40E_ERR_NO_MEMORY;
5969         }
5970         switch (mirror_conf->rule_type) {
5971         case ETH_MIRROR_VLAN:
5972                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
5973                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5974                                 mirr_rule->entries[j] =
5975                                         mirror_conf->vlan.vlan_id[i];
5976                                 j++;
5977                         }
5978                 }
5979                 if (j == 0) {
5980                         PMD_DRV_LOG(ERR, "vlan is not specified.");
5981                         rte_free(mirr_rule);
5982                         return -EINVAL;
5983                 }
5984                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
5985                 break;
5986         case ETH_MIRROR_VIRTUAL_POOL_UP:
5987         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
5988                 /* check if the specified pool bit is out of range */
5989                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
5990                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
5991                         rte_free(mirr_rule);
5992                         return -EINVAL;
5993                 }
5994                 for (i = 0, j = 0; i < pf->vf_num; i++) {
5995                         if (mirror_conf->pool_mask & (1ULL << i)) {
5996                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
5997                                 j++;
5998                         }
5999                 }
6000                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
6001                         /* add pf vsi to entries */
6002                         mirr_rule->entries[j] = pf->main_vsi_seid;
6003                         j++;
6004                 }
6005                 if (j == 0) {
6006                         PMD_DRV_LOG(ERR, "pool is not specified.");
6007                         rte_free(mirr_rule);
6008                         return -EINVAL;
6009                 }
6010                 /* egress and ingress in aq commands means from switch but not port */
6011                 mirr_rule->rule_type =
6012                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
6013                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
6014                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
6015                 break;
6016         case ETH_MIRROR_UPLINK_PORT:
6017                 /* egress and ingress in aq commands means from switch but not port*/
6018                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
6019                 break;
6020         case ETH_MIRROR_DOWNLINK_PORT:
6021                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
6022                 break;
6023         default:
6024                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
6025                         mirror_conf->rule_type);
6026                 rte_free(mirr_rule);
6027                 return -EINVAL;
6028         }
6029
6030         /* If the dst_pool is equal to vf_num, consider it as PF */
6031         if (mirror_conf->dst_pool == pf->vf_num)
6032                 dst_seid = pf->main_vsi_seid;
6033         else
6034                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
6035
6036         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
6037                                       mirr_rule->rule_type, mirr_rule->entries,
6038                                       j, &rule_id);
6039         if (ret < 0) {
6040                 PMD_DRV_LOG(ERR, "failed to add mirror rule:"
6041                                    " ret = %d, aq_err = %d.",
6042                                    ret, hw->aq.asq_last_status);
6043                 rte_free(mirr_rule);
6044                 return -ENOSYS;
6045         }
6046
6047         mirr_rule->index = sw_id;
6048         mirr_rule->num_entries = j;
6049         mirr_rule->id = rule_id;
6050         mirr_rule->dst_vsi_seid = dst_seid;
6051
6052         if (parent)
6053                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
6054         else
6055                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
6056
6057         pf->nb_mirror_rule++;
6058         return 0;
6059 }
6060
6061 /**
6062  * i40e_mirror_rule_reset
6063  * @dev: pointer to the device
6064  * @sw_id: mirror rule's sw_id
6065  *
6066  * reset a mirror rule.
6067  *
6068  **/
6069 static int
6070 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
6071 {
6072         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6073         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6074         struct i40e_mirror_rule *it, *mirr_rule = NULL;
6075         uint16_t seid;
6076         int ret;
6077
6078         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
6079
6080         seid = pf->main_vsi->veb->seid;
6081
6082         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
6083                 if (sw_id == it->index) {
6084                         mirr_rule = it;
6085                         break;
6086                 }
6087         }
6088         if (mirr_rule) {
6089                 ret = i40e_aq_del_mirror_rule(hw, seid,
6090                                 mirr_rule->rule_type,
6091                                 mirr_rule->entries,
6092                                 mirr_rule->num_entries, mirr_rule->id);
6093                 if (ret < 0) {
6094                         PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
6095                                            " status = %d, aq_err = %d.",
6096                                            ret, hw->aq.asq_last_status);
6097                         return -ENOSYS;
6098                 }
6099                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
6100                 rte_free(mirr_rule);
6101                 pf->nb_mirror_rule--;
6102         } else {
6103                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
6104                 return -ENOENT;
6105         }
6106         return 0;
6107 }
6108
6109 static int
6110 i40e_timesync_enable(struct rte_eth_dev *dev)
6111 {
6112         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6113         struct rte_eth_link *link = &dev->data->dev_link;
6114         uint32_t tsync_ctl_l;
6115         uint32_t tsync_ctl_h;
6116         uint32_t tsync_inc_l;
6117         uint32_t tsync_inc_h;
6118
6119         switch (link->link_speed) {
6120         case ETH_LINK_SPEED_40G:
6121                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
6122                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
6123                 break;
6124         case ETH_LINK_SPEED_10G:
6125                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
6126                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
6127                 break;
6128         case ETH_LINK_SPEED_1000:
6129                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
6130                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
6131                 break;
6132         default:
6133                 tsync_inc_l = 0x0;
6134                 tsync_inc_h = 0x0;
6135         }
6136
6137         /* Clear timesync registers. */
6138         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
6139         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
6140         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(0));
6141         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(1));
6142         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(2));
6143         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(3));
6144         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
6145
6146         /* Set the timesync increment value. */
6147         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
6148         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
6149
6150         /* Enable timestamping of PTP packets. */
6151         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
6152         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
6153
6154         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
6155         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
6156         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
6157
6158         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
6159         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
6160
6161         return 0;
6162 }
6163
6164 static int
6165 i40e_timesync_disable(struct rte_eth_dev *dev)
6166 {
6167         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6168         uint32_t tsync_ctl_l;
6169         uint32_t tsync_ctl_h;
6170
6171         /* Disable timestamping of transmitted PTP packets. */
6172         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
6173         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
6174
6175         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
6176         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
6177
6178         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
6179         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
6180
6181         /* Set the timesync increment value. */
6182         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
6183         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
6184
6185         return 0;
6186 }
6187
6188 static int
6189 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
6190                                 struct timespec *timestamp, uint32_t flags)
6191 {
6192         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6193         uint32_t sync_status;
6194         uint32_t rx_stmpl;
6195         uint32_t rx_stmph;
6196         uint32_t index = flags & 0x03;
6197
6198         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
6199         if ((sync_status & (1 << index)) == 0)
6200                 return -EINVAL;
6201
6202         rx_stmpl = I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
6203         rx_stmph = I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index));
6204
6205         timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
6206         timestamp->tv_nsec = 0;
6207
6208         return  0;
6209 }
6210
6211 static int
6212 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
6213                                 struct timespec *timestamp)
6214 {
6215         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6216         uint32_t sync_status;
6217         uint32_t tx_stmpl;
6218         uint32_t tx_stmph;
6219
6220         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
6221         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
6222                 return -EINVAL;
6223
6224         tx_stmpl = I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
6225         tx_stmph = I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
6226
6227         timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
6228         timestamp->tv_nsec = 0;
6229
6230         return  0;
6231 }