i40e: fix PF interrupt handler
[dpdk.git] / lib / librte_pmd_i40e / i40e_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42
43 #include <rte_string_fns.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_alarm.h>
51 #include <rte_dev.h>
52 #include <rte_eth_ctrl.h>
53
54 #include "i40e_logs.h"
55 #include "i40e/i40e_prototype.h"
56 #include "i40e/i40e_adminq_cmd.h"
57 #include "i40e/i40e_type.h"
58 #include "i40e_ethdev.h"
59 #include "i40e_rxtx.h"
60 #include "i40e_pf.h"
61
62 #define I40E_DEFAULT_RX_FREE_THRESH  32
63 #define I40E_DEFAULT_RX_PTHRESH      8
64 #define I40E_DEFAULT_RX_HTHRESH      8
65 #define I40E_DEFAULT_RX_WTHRESH      0
66
67 #define I40E_DEFAULT_TX_FREE_THRESH  32
68 #define I40E_DEFAULT_TX_PTHRESH      32
69 #define I40E_DEFAULT_TX_HTHRESH      0
70 #define I40E_DEFAULT_TX_WTHRESH      0
71 #define I40E_DEFAULT_TX_RSBIT_THRESH 32
72
73 /* Maximun number of MAC addresses */
74 #define I40E_NUM_MACADDR_MAX       64
75 #define I40E_CLEAR_PXE_WAIT_MS     200
76
77 /* Maximun number of capability elements */
78 #define I40E_MAX_CAP_ELE_NUM       128
79
80 /* Wait count and inteval */
81 #define I40E_CHK_Q_ENA_COUNT       1000
82 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
83
84 /* Maximun number of VSI */
85 #define I40E_MAX_NUM_VSIS          (384UL)
86
87 /* Bit shift and mask */
88 #define I40E_16_BIT_SHIFT 16
89 #define I40E_16_BIT_MASK  0xFFFF
90 #define I40E_32_BIT_SHIFT 32
91 #define I40E_32_BIT_MASK  0xFFFFFFFF
92 #define I40E_48_BIT_SHIFT 48
93 #define I40E_48_BIT_MASK  0xFFFFFFFFFFFFULL
94
95 /* Default queue interrupt throttling time in microseconds*/
96 #define I40E_ITR_INDEX_DEFAULT          0
97 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
98 #define I40E_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
99
100 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
101
102 /* Mask of PF interrupt causes */
103 #define I40E_PFINT_ICR0_ENA_MASK ( \
104                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
105                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
106                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
107                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
108                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
109                 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
110                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
111                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
112                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
113                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
114
115 static int eth_i40e_dev_init(\
116                         __attribute__((unused)) struct eth_driver *eth_drv,
117                         struct rte_eth_dev *eth_dev);
118 static int i40e_dev_configure(struct rte_eth_dev *dev);
119 static int i40e_dev_start(struct rte_eth_dev *dev);
120 static void i40e_dev_stop(struct rte_eth_dev *dev);
121 static void i40e_dev_close(struct rte_eth_dev *dev);
122 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
123 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
124 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
125 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
126 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
127 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
128 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
129                                struct rte_eth_stats *stats);
130 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
131 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
132                                             uint16_t queue_id,
133                                             uint8_t stat_idx,
134                                             uint8_t is_rx);
135 static void i40e_dev_info_get(struct rte_eth_dev *dev,
136                               struct rte_eth_dev_info *dev_info);
137 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
138                                 uint16_t vlan_id,
139                                 int on);
140 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
141 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
142 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
143                                       uint16_t queue,
144                                       int on);
145 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
146 static int i40e_dev_led_on(struct rte_eth_dev *dev);
147 static int i40e_dev_led_off(struct rte_eth_dev *dev);
148 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
149                               struct rte_eth_fc_conf *fc_conf);
150 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
151                                        struct rte_eth_pfc_conf *pfc_conf);
152 static void i40e_macaddr_add(struct rte_eth_dev *dev,
153                           struct ether_addr *mac_addr,
154                           uint32_t index,
155                           uint32_t pool);
156 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
157 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
158                                     struct rte_eth_rss_reta *reta_conf);
159 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
160                                    struct rte_eth_rss_reta *reta_conf);
161
162 static int i40e_get_cap(struct i40e_hw *hw);
163 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
164 static int i40e_pf_setup(struct i40e_pf *pf);
165 static int i40e_vsi_init(struct i40e_vsi *vsi);
166 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
167                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
168 static void i40e_stat_update_48(struct i40e_hw *hw,
169                                uint32_t hireg,
170                                uint32_t loreg,
171                                bool offset_loaded,
172                                uint64_t *offset,
173                                uint64_t *stat);
174 static void i40e_pf_config_irq0(struct i40e_hw *hw);
175 static void i40e_dev_interrupt_handler(
176                 __rte_unused struct rte_intr_handle *handle, void *param);
177 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
178                                 uint32_t base, uint32_t num);
179 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
180 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
181                         uint32_t base);
182 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
183                         uint16_t num);
184 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
185 static int i40e_veb_release(struct i40e_veb *veb);
186 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
187                                                 struct i40e_vsi *vsi);
188 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
189 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
190 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
191                                              struct i40e_macvlan_filter *mv_f,
192                                              int num,
193                                              struct ether_addr *addr);
194 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
195                                              struct i40e_macvlan_filter *mv_f,
196                                              int num,
197                                              uint16_t vlan);
198 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
199 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
200                                     struct rte_eth_rss_conf *rss_conf);
201 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
202                                       struct rte_eth_rss_conf *rss_conf);
203 static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
204                                 struct rte_eth_udp_tunnel *udp_tunnel);
205 static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
206                                 struct rte_eth_udp_tunnel *udp_tunnel);
207 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
208                                 enum rte_filter_type filter_type,
209                                 enum rte_filter_op filter_op,
210                                 void *arg);
211
212 /* Default hash key buffer for RSS */
213 static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
214
215 static struct rte_pci_id pci_id_i40e_map[] = {
216 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
217 #include "rte_pci_dev_ids.h"
218 { .vendor_id = 0, /* sentinel */ },
219 };
220
221 static struct eth_dev_ops i40e_eth_dev_ops = {
222         .dev_configure                = i40e_dev_configure,
223         .dev_start                    = i40e_dev_start,
224         .dev_stop                     = i40e_dev_stop,
225         .dev_close                    = i40e_dev_close,
226         .promiscuous_enable           = i40e_dev_promiscuous_enable,
227         .promiscuous_disable          = i40e_dev_promiscuous_disable,
228         .allmulticast_enable          = i40e_dev_allmulticast_enable,
229         .allmulticast_disable         = i40e_dev_allmulticast_disable,
230         .dev_set_link_up              = i40e_dev_set_link_up,
231         .dev_set_link_down            = i40e_dev_set_link_down,
232         .link_update                  = i40e_dev_link_update,
233         .stats_get                    = i40e_dev_stats_get,
234         .stats_reset                  = i40e_dev_stats_reset,
235         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
236         .dev_infos_get                = i40e_dev_info_get,
237         .vlan_filter_set              = i40e_vlan_filter_set,
238         .vlan_tpid_set                = i40e_vlan_tpid_set,
239         .vlan_offload_set             = i40e_vlan_offload_set,
240         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
241         .vlan_pvid_set                = i40e_vlan_pvid_set,
242         .rx_queue_start               = i40e_dev_rx_queue_start,
243         .rx_queue_stop                = i40e_dev_rx_queue_stop,
244         .tx_queue_start               = i40e_dev_tx_queue_start,
245         .tx_queue_stop                = i40e_dev_tx_queue_stop,
246         .rx_queue_setup               = i40e_dev_rx_queue_setup,
247         .rx_queue_release             = i40e_dev_rx_queue_release,
248         .rx_queue_count               = i40e_dev_rx_queue_count,
249         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
250         .tx_queue_setup               = i40e_dev_tx_queue_setup,
251         .tx_queue_release             = i40e_dev_tx_queue_release,
252         .dev_led_on                   = i40e_dev_led_on,
253         .dev_led_off                  = i40e_dev_led_off,
254         .flow_ctrl_set                = i40e_flow_ctrl_set,
255         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
256         .mac_addr_add                 = i40e_macaddr_add,
257         .mac_addr_remove              = i40e_macaddr_remove,
258         .reta_update                  = i40e_dev_rss_reta_update,
259         .reta_query                   = i40e_dev_rss_reta_query,
260         .rss_hash_update              = i40e_dev_rss_hash_update,
261         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
262         .udp_tunnel_add               = i40e_dev_udp_tunnel_add,
263         .udp_tunnel_del               = i40e_dev_udp_tunnel_del,
264         .filter_ctrl                  = i40e_dev_filter_ctrl,
265 };
266
267 static struct eth_driver rte_i40e_pmd = {
268         {
269                 .name = "rte_i40e_pmd",
270                 .id_table = pci_id_i40e_map,
271                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
272         },
273         .eth_dev_init = eth_i40e_dev_init,
274         .dev_private_size = sizeof(struct i40e_adapter),
275 };
276
277 static inline int
278 i40e_prev_power_of_2(int n)
279 {
280        int p = n;
281
282        --p;
283        p |= p >> 1;
284        p |= p >> 2;
285        p |= p >> 4;
286        p |= p >> 8;
287        p |= p >> 16;
288        if (p == (n - 1))
289                return n;
290        p >>= 1;
291
292        return ++p;
293 }
294
295 static inline int
296 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
297                                      struct rte_eth_link *link)
298 {
299         struct rte_eth_link *dst = link;
300         struct rte_eth_link *src = &(dev->data->dev_link);
301
302         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
303                                         *(uint64_t *)src) == 0)
304                 return -1;
305
306         return 0;
307 }
308
309 static inline int
310 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
311                                       struct rte_eth_link *link)
312 {
313         struct rte_eth_link *dst = &(dev->data->dev_link);
314         struct rte_eth_link *src = link;
315
316         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
317                                         *(uint64_t *)src) == 0)
318                 return -1;
319
320         return 0;
321 }
322
323 /*
324  * Driver initialization routine.
325  * Invoked once at EAL init time.
326  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
327  */
328 static int
329 rte_i40e_pmd_init(const char *name __rte_unused,
330                   const char *params __rte_unused)
331 {
332         PMD_INIT_FUNC_TRACE();
333         rte_eth_driver_register(&rte_i40e_pmd);
334
335         return 0;
336 }
337
338 static struct rte_driver rte_i40e_driver = {
339         .type = PMD_PDEV,
340         .init = rte_i40e_pmd_init,
341 };
342
343 PMD_REGISTER_DRIVER(rte_i40e_driver);
344
345 static int
346 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
347                   struct rte_eth_dev *dev)
348 {
349         struct rte_pci_device *pci_dev;
350         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
351         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
352         struct i40e_vsi *vsi;
353         int ret;
354         uint32_t len;
355         uint8_t aq_fail = 0;
356
357         PMD_INIT_FUNC_TRACE();
358
359         dev->dev_ops = &i40e_eth_dev_ops;
360         dev->rx_pkt_burst = i40e_recv_pkts;
361         dev->tx_pkt_burst = i40e_xmit_pkts;
362
363         /* for secondary processes, we don't initialise any further as primary
364          * has already done this work. Only check we don't need a different
365          * RX function */
366         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
367                 if (dev->data->scattered_rx)
368                         dev->rx_pkt_burst = i40e_recv_scattered_pkts;
369                 return 0;
370         }
371         pci_dev = dev->pci_dev;
372         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
373         pf->adapter->eth_dev = dev;
374         pf->dev_data = dev->data;
375
376         hw->back = I40E_PF_TO_ADAPTER(pf);
377         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
378         if (!hw->hw_addr) {
379                 PMD_INIT_LOG(ERR, "Hardware is not available, "
380                              "as address is NULL");
381                 return -ENODEV;
382         }
383
384         hw->vendor_id = pci_dev->id.vendor_id;
385         hw->device_id = pci_dev->id.device_id;
386         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
387         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
388         hw->bus.device = pci_dev->addr.devid;
389         hw->bus.func = pci_dev->addr.function;
390
391         /* Make sure all is clean before doing PF reset */
392         i40e_clear_hw(hw);
393
394         /* Reset here to make sure all is clean for each PF */
395         ret = i40e_pf_reset(hw);
396         if (ret) {
397                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
398                 return ret;
399         }
400
401         /* Initialize the shared code (base driver) */
402         ret = i40e_init_shared_code(hw);
403         if (ret) {
404                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
405                 return ret;
406         }
407
408         /* Initialize the parameters for adminq */
409         i40e_init_adminq_parameter(hw);
410         ret = i40e_init_adminq(hw);
411         if (ret != I40E_SUCCESS) {
412                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
413                 return -EIO;
414         }
415         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
416                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
417                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
418                      ((hw->nvm.version >> 12) & 0xf),
419                      ((hw->nvm.version >> 4) & 0xff),
420                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
421
422         /* Disable LLDP */
423         ret = i40e_aq_stop_lldp(hw, true, NULL);
424         if (ret != I40E_SUCCESS) /* Its failure can be ignored */
425                 PMD_INIT_LOG(INFO, "Failed to stop lldp");
426
427         /* Clear PXE mode */
428         i40e_clear_pxe_mode(hw);
429
430         /* Get hw capabilities */
431         ret = i40e_get_cap(hw);
432         if (ret != I40E_SUCCESS) {
433                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
434                 goto err_get_capabilities;
435         }
436
437         /* Initialize parameters for PF */
438         ret = i40e_pf_parameter_init(dev);
439         if (ret != 0) {
440                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
441                 goto err_parameter_init;
442         }
443
444         /* Initialize the queue management */
445         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
446         if (ret < 0) {
447                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
448                 goto err_qp_pool_init;
449         }
450         ret = i40e_res_pool_init(&pf->msix_pool, 1,
451                                 hw->func_caps.num_msix_vectors - 1);
452         if (ret < 0) {
453                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
454                 goto err_msix_pool_init;
455         }
456
457         /* Initialize lan hmc */
458         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
459                                 hw->func_caps.num_rx_qp, 0, 0);
460         if (ret != I40E_SUCCESS) {
461                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
462                 goto err_init_lan_hmc;
463         }
464
465         /* Configure lan hmc */
466         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
467         if (ret != I40E_SUCCESS) {
468                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
469                 goto err_configure_lan_hmc;
470         }
471
472         /* Get and check the mac address */
473         i40e_get_mac_addr(hw, hw->mac.addr);
474         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
475                 PMD_INIT_LOG(ERR, "mac address is not valid");
476                 ret = -EIO;
477                 goto err_get_mac_addr;
478         }
479         /* Copy the permanent MAC address */
480         ether_addr_copy((struct ether_addr *) hw->mac.addr,
481                         (struct ether_addr *) hw->mac.perm_addr);
482
483         /* Disable flow control */
484         hw->fc.requested_mode = I40E_FC_NONE;
485         i40e_set_fc(hw, &aq_fail, TRUE);
486
487         /* PF setup, which includes VSI setup */
488         ret = i40e_pf_setup(pf);
489         if (ret) {
490                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
491                 goto err_setup_pf_switch;
492         }
493
494         vsi = pf->main_vsi;
495
496         /* Disable double vlan by default */
497         i40e_vsi_config_double_vlan(vsi, FALSE);
498
499         if (!vsi->max_macaddrs)
500                 len = ETHER_ADDR_LEN;
501         else
502                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
503
504         /* Should be after VSI initialized */
505         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
506         if (!dev->data->mac_addrs) {
507                 PMD_INIT_LOG(ERR, "Failed to allocated memory "
508                                         "for storing mac address");
509                 goto err_get_mac_addr;
510         }
511         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
512                                         &dev->data->mac_addrs[0]);
513
514         /* initialize pf host driver to setup SRIOV resource if applicable */
515         i40e_pf_host_init(dev);
516
517         /* register callback func to eal lib */
518         rte_intr_callback_register(&(pci_dev->intr_handle),
519                 i40e_dev_interrupt_handler, (void *)dev);
520
521         /* configure and enable device interrupt */
522         i40e_pf_config_irq0(hw);
523         i40e_pf_enable_irq0(hw);
524
525         /* enable uio intr after callback register */
526         rte_intr_enable(&(pci_dev->intr_handle));
527
528         return 0;
529
530 err_setup_pf_switch:
531         rte_free(pf->main_vsi);
532 err_get_mac_addr:
533 err_configure_lan_hmc:
534         (void)i40e_shutdown_lan_hmc(hw);
535 err_init_lan_hmc:
536         i40e_res_pool_destroy(&pf->msix_pool);
537 err_msix_pool_init:
538         i40e_res_pool_destroy(&pf->qp_pool);
539 err_qp_pool_init:
540 err_parameter_init:
541 err_get_capabilities:
542         (void)i40e_shutdown_adminq(hw);
543
544         return ret;
545 }
546
547 static int
548 i40e_dev_configure(struct rte_eth_dev *dev)
549 {
550         return i40e_dev_init_vlan(dev);
551 }
552
553 void
554 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
555 {
556         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
557         uint16_t msix_vect = vsi->msix_intr;
558         uint16_t i;
559
560         for (i = 0; i < vsi->nb_qps; i++) {
561                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
562                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
563                 rte_wmb();
564         }
565
566         if (vsi->type != I40E_VSI_SRIOV) {
567                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
568                 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
569                                 msix_vect - 1), 0);
570         } else {
571                 uint32_t reg;
572                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
573                         vsi->user_param + (msix_vect - 1);
574
575                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
576         }
577         I40E_WRITE_FLUSH(hw);
578 }
579
580 static inline uint16_t
581 i40e_calc_itr_interval(int16_t interval)
582 {
583         if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
584                 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
585
586         /* Convert to hardware count, as writing each 1 represents 2 us */
587         return (interval/2);
588 }
589
590 void
591 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
592 {
593         uint32_t val;
594         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
595         uint16_t msix_vect = vsi->msix_intr;
596         int i;
597
598         for (i = 0; i < vsi->nb_qps; i++)
599                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
600
601         /* Bind all RX queues to allocated MSIX interrupt */
602         for (i = 0; i < vsi->nb_qps; i++) {
603                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
604                         I40E_QINT_RQCTL_ITR_INDX_MASK |
605                         ((vsi->base_queue + i + 1) <<
606                         I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
607                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
608                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
609
610                 if (i == vsi->nb_qps - 1)
611                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
612                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
613         }
614
615         /* Write first RX queue to Link list register as the head element */
616         if (vsi->type != I40E_VSI_SRIOV) {
617                 uint16_t interval =
618                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
619
620                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
621                                                 (vsi->base_queue <<
622                                 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
623                         (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
624
625                 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
626                                                 msix_vect - 1), interval);
627
628 #ifndef I40E_GLINT_CTL
629 #define I40E_GLINT_CTL                     0x0003F800
630 #define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
631 #endif
632                 /* Disable auto-mask on enabling of all none-zero  interrupt */
633                 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
634                         I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
635         } else {
636                 uint32_t reg;
637
638                 /* num_msix_vectors_vf needs to minus irq0 */
639                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
640                         vsi->user_param + (msix_vect - 1);
641
642                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
643                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
644                                 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
645         }
646
647         I40E_WRITE_FLUSH(hw);
648 }
649
650 static void
651 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
652 {
653         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
654         uint16_t interval = i40e_calc_itr_interval(\
655                         RTE_LIBRTE_I40E_ITR_INTERVAL);
656
657         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
658                                         I40E_PFINT_DYN_CTLN_INTENA_MASK |
659                                         I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
660                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
661                         (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
662 }
663
664 static void
665 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
666 {
667         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
668
669         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
670 }
671
672 static inline uint8_t
673 i40e_parse_link_speed(uint16_t eth_link_speed)
674 {
675         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
676
677         switch (eth_link_speed) {
678         case ETH_LINK_SPEED_40G:
679                 link_speed = I40E_LINK_SPEED_40GB;
680                 break;
681         case ETH_LINK_SPEED_20G:
682                 link_speed = I40E_LINK_SPEED_20GB;
683                 break;
684         case ETH_LINK_SPEED_10G:
685                 link_speed = I40E_LINK_SPEED_10GB;
686                 break;
687         case ETH_LINK_SPEED_1000:
688                 link_speed = I40E_LINK_SPEED_1GB;
689                 break;
690         case ETH_LINK_SPEED_100:
691                 link_speed = I40E_LINK_SPEED_100MB;
692                 break;
693         }
694
695         return link_speed;
696 }
697
698 static int
699 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
700 {
701         enum i40e_status_code status;
702         struct i40e_aq_get_phy_abilities_resp phy_ab;
703         struct i40e_aq_set_phy_config phy_conf;
704         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
705                         I40E_AQ_PHY_FLAG_PAUSE_RX |
706                         I40E_AQ_PHY_FLAG_LOW_POWER;
707         const uint8_t advt = I40E_LINK_SPEED_40GB |
708                         I40E_LINK_SPEED_10GB |
709                         I40E_LINK_SPEED_1GB |
710                         I40E_LINK_SPEED_100MB;
711         int ret = -ENOTSUP;
712
713         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
714                                               NULL);
715         if (status)
716                 return ret;
717
718         memset(&phy_conf, 0, sizeof(phy_conf));
719
720         /* bits 0-2 use the values from get_phy_abilities_resp */
721         abilities &= ~mask;
722         abilities |= phy_ab.abilities & mask;
723
724         /* update ablities and speed */
725         if (abilities & I40E_AQ_PHY_AN_ENABLED)
726                 phy_conf.link_speed = advt;
727         else
728                 phy_conf.link_speed = force_speed;
729
730         phy_conf.abilities = abilities;
731
732         /* use get_phy_abilities_resp value for the rest */
733         phy_conf.phy_type = phy_ab.phy_type;
734         phy_conf.eee_capability = phy_ab.eee_capability;
735         phy_conf.eeer = phy_ab.eeer_val;
736         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
737
738         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
739                     phy_ab.abilities, phy_ab.link_speed);
740         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
741                     phy_conf.abilities, phy_conf.link_speed);
742
743         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
744         if (status)
745                 return ret;
746
747         return I40E_SUCCESS;
748 }
749
750 static int
751 i40e_apply_link_speed(struct rte_eth_dev *dev)
752 {
753         uint8_t speed;
754         uint8_t abilities = 0;
755         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
756         struct rte_eth_conf *conf = &dev->data->dev_conf;
757
758         speed = i40e_parse_link_speed(conf->link_speed);
759         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
760         if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
761                 abilities |= I40E_AQ_PHY_AN_ENABLED;
762         else
763                 abilities |= I40E_AQ_PHY_LINK_ENABLED;
764
765         return i40e_phy_conf_link(hw, abilities, speed);
766 }
767
768 static int
769 i40e_dev_start(struct rte_eth_dev *dev)
770 {
771         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
772         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
773         struct i40e_vsi *vsi = pf->main_vsi;
774         int ret;
775
776         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
777                 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
778                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
779                              dev->data->dev_conf.link_duplex,
780                              dev->data->port_id);
781                 return -EINVAL;
782         }
783
784         /* Initialize VSI */
785         ret = i40e_vsi_init(vsi);
786         if (ret != I40E_SUCCESS) {
787                 PMD_DRV_LOG(ERR, "Failed to init VSI");
788                 goto err_up;
789         }
790
791         /* Map queues with MSIX interrupt */
792         i40e_vsi_queues_bind_intr(vsi);
793         i40e_vsi_enable_queues_intr(vsi);
794
795         /* Enable all queues which have been configured */
796         ret = i40e_vsi_switch_queues(vsi, TRUE);
797         if (ret != I40E_SUCCESS) {
798                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
799                 goto err_up;
800         }
801
802         /* Enable receiving broadcast packets */
803         if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
804                 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
805                 if (ret != I40E_SUCCESS)
806                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
807         }
808
809         /* Apply link configure */
810         ret = i40e_apply_link_speed(dev);
811         if (I40E_SUCCESS != ret) {
812                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
813                 goto err_up;
814         }
815
816         return I40E_SUCCESS;
817
818 err_up:
819         i40e_vsi_switch_queues(vsi, FALSE);
820
821         return ret;
822 }
823
824 static void
825 i40e_dev_stop(struct rte_eth_dev *dev)
826 {
827         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
828         struct i40e_vsi *vsi = pf->main_vsi;
829
830         /* Disable all queues */
831         i40e_vsi_switch_queues(vsi, FALSE);
832
833         /* Set link down */
834         i40e_dev_set_link_down(dev);
835
836         /* un-map queues with interrupt registers */
837         i40e_vsi_disable_queues_intr(vsi);
838         i40e_vsi_queues_unbind_intr(vsi);
839 }
840
841 static void
842 i40e_dev_close(struct rte_eth_dev *dev)
843 {
844         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
845         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
846         uint32_t reg;
847
848         PMD_INIT_FUNC_TRACE();
849
850         i40e_dev_stop(dev);
851
852         /* Disable interrupt */
853         i40e_pf_disable_irq0(hw);
854         rte_intr_disable(&(dev->pci_dev->intr_handle));
855
856         /* shutdown and destroy the HMC */
857         i40e_shutdown_lan_hmc(hw);
858
859         /* release all the existing VSIs and VEBs */
860         i40e_vsi_release(pf->main_vsi);
861
862         /* shutdown the adminq */
863         i40e_aq_queue_shutdown(hw, true);
864         i40e_shutdown_adminq(hw);
865
866         i40e_res_pool_destroy(&pf->qp_pool);
867         i40e_res_pool_destroy(&pf->msix_pool);
868
869         /* force a PF reset to clean anything leftover */
870         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
871         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
872                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
873         I40E_WRITE_FLUSH(hw);
874 }
875
876 static void
877 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
878 {
879         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
880         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
881         struct i40e_vsi *vsi = pf->main_vsi;
882         int status;
883
884         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
885                                                         true, NULL);
886         if (status != I40E_SUCCESS)
887                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
888
889         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
890                                                         TRUE, NULL);
891         if (status != I40E_SUCCESS)
892                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
893
894 }
895
896 static void
897 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
898 {
899         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
900         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
901         struct i40e_vsi *vsi = pf->main_vsi;
902         int status;
903
904         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
905                                                         false, NULL);
906         if (status != I40E_SUCCESS)
907                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
908
909         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
910                                                         false, NULL);
911         if (status != I40E_SUCCESS)
912                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
913 }
914
915 static void
916 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
917 {
918         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
919         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
920         struct i40e_vsi *vsi = pf->main_vsi;
921         int ret;
922
923         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
924         if (ret != I40E_SUCCESS)
925                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
926 }
927
928 static void
929 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
930 {
931         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
932         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
933         struct i40e_vsi *vsi = pf->main_vsi;
934         int ret;
935
936         if (dev->data->promiscuous == 1)
937                 return; /* must remain in all_multicast mode */
938
939         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
940                                 vsi->seid, FALSE, NULL);
941         if (ret != I40E_SUCCESS)
942                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
943 }
944
945 /*
946  * Set device link up.
947  */
948 static int
949 i40e_dev_set_link_up(struct rte_eth_dev *dev)
950 {
951         /* re-apply link speed setting */
952         return i40e_apply_link_speed(dev);
953 }
954
955 /*
956  * Set device link down.
957  */
958 static int
959 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
960 {
961         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
962         uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
963         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
964
965         return i40e_phy_conf_link(hw, abilities, speed);
966 }
967
968 int
969 i40e_dev_link_update(struct rte_eth_dev *dev,
970                      __rte_unused int wait_to_complete)
971 {
972         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
973         struct i40e_link_status link_status;
974         struct rte_eth_link link, old;
975         int status;
976
977         memset(&link, 0, sizeof(link));
978         memset(&old, 0, sizeof(old));
979         memset(&link_status, 0, sizeof(link_status));
980         rte_i40e_dev_atomic_read_link_status(dev, &old);
981
982         /* Get link status information from hardware */
983         status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
984         if (status != I40E_SUCCESS) {
985                 link.link_speed = ETH_LINK_SPEED_100;
986                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
987                 PMD_DRV_LOG(ERR, "Failed to get link info");
988                 goto out;
989         }
990
991         link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
992
993         if (!link.link_status)
994                 goto out;
995
996         /* i40e uses full duplex only */
997         link.link_duplex = ETH_LINK_FULL_DUPLEX;
998
999         /* Parse the link status */
1000         switch (link_status.link_speed) {
1001         case I40E_LINK_SPEED_100MB:
1002                 link.link_speed = ETH_LINK_SPEED_100;
1003                 break;
1004         case I40E_LINK_SPEED_1GB:
1005                 link.link_speed = ETH_LINK_SPEED_1000;
1006                 break;
1007         case I40E_LINK_SPEED_10GB:
1008                 link.link_speed = ETH_LINK_SPEED_10G;
1009                 break;
1010         case I40E_LINK_SPEED_20GB:
1011                 link.link_speed = ETH_LINK_SPEED_20G;
1012                 break;
1013         case I40E_LINK_SPEED_40GB:
1014                 link.link_speed = ETH_LINK_SPEED_40G;
1015                 break;
1016         default:
1017                 link.link_speed = ETH_LINK_SPEED_100;
1018                 break;
1019         }
1020
1021 out:
1022         rte_i40e_dev_atomic_write_link_status(dev, &link);
1023         if (link.link_status == old.link_status)
1024                 return -1;
1025
1026         return 0;
1027 }
1028
1029 /* Get all the statistics of a VSI */
1030 void
1031 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1032 {
1033         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1034         struct i40e_eth_stats *nes = &vsi->eth_stats;
1035         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1036         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1037
1038         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1039                             vsi->offset_loaded, &oes->rx_bytes,
1040                             &nes->rx_bytes);
1041         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1042                             vsi->offset_loaded, &oes->rx_unicast,
1043                             &nes->rx_unicast);
1044         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1045                             vsi->offset_loaded, &oes->rx_multicast,
1046                             &nes->rx_multicast);
1047         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1048                             vsi->offset_loaded, &oes->rx_broadcast,
1049                             &nes->rx_broadcast);
1050         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1051                             &oes->rx_discards, &nes->rx_discards);
1052         /* GLV_REPC not supported */
1053         /* GLV_RMPC not supported */
1054         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1055                             &oes->rx_unknown_protocol,
1056                             &nes->rx_unknown_protocol);
1057         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1058                             vsi->offset_loaded, &oes->tx_bytes,
1059                             &nes->tx_bytes);
1060         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1061                             vsi->offset_loaded, &oes->tx_unicast,
1062                             &nes->tx_unicast);
1063         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1064                             vsi->offset_loaded, &oes->tx_multicast,
1065                             &nes->tx_multicast);
1066         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1067                             vsi->offset_loaded,  &oes->tx_broadcast,
1068                             &nes->tx_broadcast);
1069         /* GLV_TDPC not supported */
1070         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1071                             &oes->tx_errors, &nes->tx_errors);
1072         vsi->offset_loaded = true;
1073
1074         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1075                     vsi->vsi_id);
1076         PMD_DRV_LOG(DEBUG, "rx_bytes:            %lu", nes->rx_bytes);
1077         PMD_DRV_LOG(DEBUG, "rx_unicast:          %lu", nes->rx_unicast);
1078         PMD_DRV_LOG(DEBUG, "rx_multicast:        %lu", nes->rx_multicast);
1079         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %lu", nes->rx_broadcast);
1080         PMD_DRV_LOG(DEBUG, "rx_discards:         %lu", nes->rx_discards);
1081         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1082                     nes->rx_unknown_protocol);
1083         PMD_DRV_LOG(DEBUG, "tx_bytes:            %lu", nes->tx_bytes);
1084         PMD_DRV_LOG(DEBUG, "tx_unicast:          %lu", nes->tx_unicast);
1085         PMD_DRV_LOG(DEBUG, "tx_multicast:        %lu", nes->tx_multicast);
1086         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %lu", nes->tx_broadcast);
1087         PMD_DRV_LOG(DEBUG, "tx_discards:         %lu", nes->tx_discards);
1088         PMD_DRV_LOG(DEBUG, "tx_errors:           %lu", nes->tx_errors);
1089         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1090                     vsi->vsi_id);
1091 }
1092
1093 /* Get all statistics of a port */
1094 static void
1095 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1096 {
1097         uint32_t i;
1098         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1099         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1100         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1101         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1102
1103         /* Get statistics of struct i40e_eth_stats */
1104         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1105                             I40E_GLPRT_GORCL(hw->port),
1106                             pf->offset_loaded, &os->eth.rx_bytes,
1107                             &ns->eth.rx_bytes);
1108         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1109                             I40E_GLPRT_UPRCL(hw->port),
1110                             pf->offset_loaded, &os->eth.rx_unicast,
1111                             &ns->eth.rx_unicast);
1112         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1113                             I40E_GLPRT_MPRCL(hw->port),
1114                             pf->offset_loaded, &os->eth.rx_multicast,
1115                             &ns->eth.rx_multicast);
1116         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1117                             I40E_GLPRT_BPRCL(hw->port),
1118                             pf->offset_loaded, &os->eth.rx_broadcast,
1119                             &ns->eth.rx_broadcast);
1120         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1121                             pf->offset_loaded, &os->eth.rx_discards,
1122                             &ns->eth.rx_discards);
1123         /* GLPRT_REPC not supported */
1124         /* GLPRT_RMPC not supported */
1125         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1126                             pf->offset_loaded,
1127                             &os->eth.rx_unknown_protocol,
1128                             &ns->eth.rx_unknown_protocol);
1129         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1130                             I40E_GLPRT_GOTCL(hw->port),
1131                             pf->offset_loaded, &os->eth.tx_bytes,
1132                             &ns->eth.tx_bytes);
1133         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1134                             I40E_GLPRT_UPTCL(hw->port),
1135                             pf->offset_loaded, &os->eth.tx_unicast,
1136                             &ns->eth.tx_unicast);
1137         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1138                             I40E_GLPRT_MPTCL(hw->port),
1139                             pf->offset_loaded, &os->eth.tx_multicast,
1140                             &ns->eth.tx_multicast);
1141         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1142                             I40E_GLPRT_BPTCL(hw->port),
1143                             pf->offset_loaded, &os->eth.tx_broadcast,
1144                             &ns->eth.tx_broadcast);
1145         i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
1146                             pf->offset_loaded, &os->eth.tx_discards,
1147                             &ns->eth.tx_discards);
1148         /* GLPRT_TEPC not supported */
1149
1150         /* additional port specific stats */
1151         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1152                             pf->offset_loaded, &os->tx_dropped_link_down,
1153                             &ns->tx_dropped_link_down);
1154         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1155                             pf->offset_loaded, &os->crc_errors,
1156                             &ns->crc_errors);
1157         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1158                             pf->offset_loaded, &os->illegal_bytes,
1159                             &ns->illegal_bytes);
1160         /* GLPRT_ERRBC not supported */
1161         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1162                             pf->offset_loaded, &os->mac_local_faults,
1163                             &ns->mac_local_faults);
1164         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1165                             pf->offset_loaded, &os->mac_remote_faults,
1166                             &ns->mac_remote_faults);
1167         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1168                             pf->offset_loaded, &os->rx_length_errors,
1169                             &ns->rx_length_errors);
1170         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1171                             pf->offset_loaded, &os->link_xon_rx,
1172                             &ns->link_xon_rx);
1173         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1174                             pf->offset_loaded, &os->link_xoff_rx,
1175                             &ns->link_xoff_rx);
1176         for (i = 0; i < 8; i++) {
1177                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1178                                     pf->offset_loaded,
1179                                     &os->priority_xon_rx[i],
1180                                     &ns->priority_xon_rx[i]);
1181                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1182                                     pf->offset_loaded,
1183                                     &os->priority_xoff_rx[i],
1184                                     &ns->priority_xoff_rx[i]);
1185         }
1186         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1187                             pf->offset_loaded, &os->link_xon_tx,
1188                             &ns->link_xon_tx);
1189         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1190                             pf->offset_loaded, &os->link_xoff_tx,
1191                             &ns->link_xoff_tx);
1192         for (i = 0; i < 8; i++) {
1193                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1194                                     pf->offset_loaded,
1195                                     &os->priority_xon_tx[i],
1196                                     &ns->priority_xon_tx[i]);
1197                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1198                                     pf->offset_loaded,
1199                                     &os->priority_xoff_tx[i],
1200                                     &ns->priority_xoff_tx[i]);
1201                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1202                                     pf->offset_loaded,
1203                                     &os->priority_xon_2_xoff[i],
1204                                     &ns->priority_xon_2_xoff[i]);
1205         }
1206         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1207                             I40E_GLPRT_PRC64L(hw->port),
1208                             pf->offset_loaded, &os->rx_size_64,
1209                             &ns->rx_size_64);
1210         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1211                             I40E_GLPRT_PRC127L(hw->port),
1212                             pf->offset_loaded, &os->rx_size_127,
1213                             &ns->rx_size_127);
1214         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1215                             I40E_GLPRT_PRC255L(hw->port),
1216                             pf->offset_loaded, &os->rx_size_255,
1217                             &ns->rx_size_255);
1218         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1219                             I40E_GLPRT_PRC511L(hw->port),
1220                             pf->offset_loaded, &os->rx_size_511,
1221                             &ns->rx_size_511);
1222         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1223                             I40E_GLPRT_PRC1023L(hw->port),
1224                             pf->offset_loaded, &os->rx_size_1023,
1225                             &ns->rx_size_1023);
1226         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1227                             I40E_GLPRT_PRC1522L(hw->port),
1228                             pf->offset_loaded, &os->rx_size_1522,
1229                             &ns->rx_size_1522);
1230         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1231                             I40E_GLPRT_PRC9522L(hw->port),
1232                             pf->offset_loaded, &os->rx_size_big,
1233                             &ns->rx_size_big);
1234         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1235                             pf->offset_loaded, &os->rx_undersize,
1236                             &ns->rx_undersize);
1237         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1238                             pf->offset_loaded, &os->rx_fragments,
1239                             &ns->rx_fragments);
1240         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1241                             pf->offset_loaded, &os->rx_oversize,
1242                             &ns->rx_oversize);
1243         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1244                             pf->offset_loaded, &os->rx_jabber,
1245                             &ns->rx_jabber);
1246         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1247                             I40E_GLPRT_PTC64L(hw->port),
1248                             pf->offset_loaded, &os->tx_size_64,
1249                             &ns->tx_size_64);
1250         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1251                             I40E_GLPRT_PTC127L(hw->port),
1252                             pf->offset_loaded, &os->tx_size_127,
1253                             &ns->tx_size_127);
1254         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1255                             I40E_GLPRT_PTC255L(hw->port),
1256                             pf->offset_loaded, &os->tx_size_255,
1257                             &ns->tx_size_255);
1258         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1259                             I40E_GLPRT_PTC511L(hw->port),
1260                             pf->offset_loaded, &os->tx_size_511,
1261                             &ns->tx_size_511);
1262         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1263                             I40E_GLPRT_PTC1023L(hw->port),
1264                             pf->offset_loaded, &os->tx_size_1023,
1265                             &ns->tx_size_1023);
1266         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1267                             I40E_GLPRT_PTC1522L(hw->port),
1268                             pf->offset_loaded, &os->tx_size_1522,
1269                             &ns->tx_size_1522);
1270         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1271                             I40E_GLPRT_PTC9522L(hw->port),
1272                             pf->offset_loaded, &os->tx_size_big,
1273                             &ns->tx_size_big);
1274         /* GLPRT_MSPDC not supported */
1275         /* GLPRT_XEC not supported */
1276
1277         pf->offset_loaded = true;
1278
1279         if (pf->main_vsi)
1280                 i40e_update_vsi_stats(pf->main_vsi);
1281
1282         stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1283                                                 ns->eth.rx_broadcast;
1284         stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1285                                                 ns->eth.tx_broadcast;
1286         stats->ibytes   = ns->eth.rx_bytes;
1287         stats->obytes   = ns->eth.tx_bytes;
1288         stats->oerrors  = ns->eth.tx_errors;
1289         stats->imcasts  = ns->eth.rx_multicast;
1290
1291         /* Rx Errors */
1292         stats->ibadcrc  = ns->crc_errors;
1293         stats->ibadlen  = ns->rx_length_errors + ns->rx_undersize +
1294                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
1295         stats->imissed  = ns->eth.rx_discards;
1296         stats->ierrors  = stats->ibadcrc + stats->ibadlen + stats->imissed;
1297
1298         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
1299         PMD_DRV_LOG(DEBUG, "rx_bytes:            %lu", ns->eth.rx_bytes);
1300         PMD_DRV_LOG(DEBUG, "rx_unicast:          %lu", ns->eth.rx_unicast);
1301         PMD_DRV_LOG(DEBUG, "rx_multicast:        %lu", ns->eth.rx_multicast);
1302         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %lu", ns->eth.rx_broadcast);
1303         PMD_DRV_LOG(DEBUG, "rx_discards:         %lu", ns->eth.rx_discards);
1304         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1305                     ns->eth.rx_unknown_protocol);
1306         PMD_DRV_LOG(DEBUG, "tx_bytes:            %lu", ns->eth.tx_bytes);
1307         PMD_DRV_LOG(DEBUG, "tx_unicast:          %lu", ns->eth.tx_unicast);
1308         PMD_DRV_LOG(DEBUG, "tx_multicast:        %lu", ns->eth.tx_multicast);
1309         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %lu", ns->eth.tx_broadcast);
1310         PMD_DRV_LOG(DEBUG, "tx_discards:         %lu", ns->eth.tx_discards);
1311         PMD_DRV_LOG(DEBUG, "tx_errors:           %lu", ns->eth.tx_errors);
1312
1313         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %lu",
1314                     ns->tx_dropped_link_down);
1315         PMD_DRV_LOG(DEBUG, "crc_errors:               %lu", ns->crc_errors);
1316         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %lu",
1317                     ns->illegal_bytes);
1318         PMD_DRV_LOG(DEBUG, "error_bytes:              %lu", ns->error_bytes);
1319         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %lu",
1320                     ns->mac_local_faults);
1321         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %lu",
1322                     ns->mac_remote_faults);
1323         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %lu",
1324                     ns->rx_length_errors);
1325         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %lu", ns->link_xon_rx);
1326         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %lu", ns->link_xoff_rx);
1327         for (i = 0; i < 8; i++) {
1328                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %lu",
1329                                 i, ns->priority_xon_rx[i]);
1330                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %lu",
1331                                 i, ns->priority_xoff_rx[i]);
1332         }
1333         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %lu", ns->link_xon_tx);
1334         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %lu", ns->link_xoff_tx);
1335         for (i = 0; i < 8; i++) {
1336                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %lu",
1337                                 i, ns->priority_xon_tx[i]);
1338                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %lu",
1339                                 i, ns->priority_xoff_tx[i]);
1340                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %lu",
1341                                 i, ns->priority_xon_2_xoff[i]);
1342         }
1343         PMD_DRV_LOG(DEBUG, "rx_size_64:               %lu", ns->rx_size_64);
1344         PMD_DRV_LOG(DEBUG, "rx_size_127:              %lu", ns->rx_size_127);
1345         PMD_DRV_LOG(DEBUG, "rx_size_255:              %lu", ns->rx_size_255);
1346         PMD_DRV_LOG(DEBUG, "rx_size_511:              %lu", ns->rx_size_511);
1347         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %lu", ns->rx_size_1023);
1348         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %lu", ns->rx_size_1522);
1349         PMD_DRV_LOG(DEBUG, "rx_size_big:              %lu", ns->rx_size_big);
1350         PMD_DRV_LOG(DEBUG, "rx_undersize:             %lu", ns->rx_undersize);
1351         PMD_DRV_LOG(DEBUG, "rx_fragments:             %lu", ns->rx_fragments);
1352         PMD_DRV_LOG(DEBUG, "rx_oversize:              %lu", ns->rx_oversize);
1353         PMD_DRV_LOG(DEBUG, "rx_jabber:                %lu", ns->rx_jabber);
1354         PMD_DRV_LOG(DEBUG, "tx_size_64:               %lu", ns->tx_size_64);
1355         PMD_DRV_LOG(DEBUG, "tx_size_127:              %lu", ns->tx_size_127);
1356         PMD_DRV_LOG(DEBUG, "tx_size_255:              %lu", ns->tx_size_255);
1357         PMD_DRV_LOG(DEBUG, "tx_size_511:              %lu", ns->tx_size_511);
1358         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %lu", ns->tx_size_1023);
1359         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %lu", ns->tx_size_1522);
1360         PMD_DRV_LOG(DEBUG, "tx_size_big:              %lu", ns->tx_size_big);
1361         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %lu",
1362                         ns->mac_short_packet_dropped);
1363         PMD_DRV_LOG(DEBUG, "checksum_error:           %lu",
1364                     ns->checksum_error);
1365         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
1366 }
1367
1368 /* Reset the statistics */
1369 static void
1370 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1371 {
1372         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1373
1374         /* It results in reloading the start point of each counter */
1375         pf->offset_loaded = false;
1376 }
1377
1378 static int
1379 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1380                                  __rte_unused uint16_t queue_id,
1381                                  __rte_unused uint8_t stat_idx,
1382                                  __rte_unused uint8_t is_rx)
1383 {
1384         PMD_INIT_FUNC_TRACE();
1385
1386         return -ENOSYS;
1387 }
1388
1389 static void
1390 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1391 {
1392         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1393         struct i40e_vsi *vsi = pf->main_vsi;
1394
1395         dev_info->max_rx_queues = vsi->nb_qps;
1396         dev_info->max_tx_queues = vsi->nb_qps;
1397         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1398         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1399         dev_info->max_mac_addrs = vsi->max_macaddrs;
1400         dev_info->max_vfs = dev->pci_dev->max_vfs;
1401         dev_info->rx_offload_capa =
1402                 DEV_RX_OFFLOAD_VLAN_STRIP |
1403                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1404                 DEV_RX_OFFLOAD_UDP_CKSUM |
1405                 DEV_RX_OFFLOAD_TCP_CKSUM;
1406         dev_info->tx_offload_capa =
1407                 DEV_TX_OFFLOAD_VLAN_INSERT |
1408                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1409                 DEV_TX_OFFLOAD_UDP_CKSUM |
1410                 DEV_TX_OFFLOAD_TCP_CKSUM |
1411                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1412
1413         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1414                 .rx_thresh = {
1415                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
1416                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
1417                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
1418                 },
1419                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1420                 .rx_drop_en = 0,
1421         };
1422
1423         dev_info->default_txconf = (struct rte_eth_txconf) {
1424                 .tx_thresh = {
1425                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
1426                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
1427                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
1428                 },
1429                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1430                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1431                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
1432         };
1433
1434 }
1435
1436 static int
1437 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1438 {
1439         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1440         struct i40e_vsi *vsi = pf->main_vsi;
1441         PMD_INIT_FUNC_TRACE();
1442
1443         if (on)
1444                 return i40e_vsi_add_vlan(vsi, vlan_id);
1445         else
1446                 return i40e_vsi_delete_vlan(vsi, vlan_id);
1447 }
1448
1449 static void
1450 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1451                    __rte_unused uint16_t tpid)
1452 {
1453         PMD_INIT_FUNC_TRACE();
1454 }
1455
1456 static void
1457 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1458 {
1459         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1460         struct i40e_vsi *vsi = pf->main_vsi;
1461
1462         if (mask & ETH_VLAN_STRIP_MASK) {
1463                 /* Enable or disable VLAN stripping */
1464                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1465                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
1466                 else
1467                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
1468         }
1469
1470         if (mask & ETH_VLAN_EXTEND_MASK) {
1471                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1472                         i40e_vsi_config_double_vlan(vsi, TRUE);
1473                 else
1474                         i40e_vsi_config_double_vlan(vsi, FALSE);
1475         }
1476 }
1477
1478 static void
1479 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1480                           __rte_unused uint16_t queue,
1481                           __rte_unused int on)
1482 {
1483         PMD_INIT_FUNC_TRACE();
1484 }
1485
1486 static int
1487 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1488 {
1489         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1490         struct i40e_vsi *vsi = pf->main_vsi;
1491         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1492         struct i40e_vsi_vlan_pvid_info info;
1493
1494         memset(&info, 0, sizeof(info));
1495         info.on = on;
1496         if (info.on)
1497                 info.config.pvid = pvid;
1498         else {
1499                 info.config.reject.tagged =
1500                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
1501                 info.config.reject.untagged =
1502                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
1503         }
1504
1505         return i40e_vsi_vlan_pvid_set(vsi, &info);
1506 }
1507
1508 static int
1509 i40e_dev_led_on(struct rte_eth_dev *dev)
1510 {
1511         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1512         uint32_t mode = i40e_led_get(hw);
1513
1514         if (mode == 0)
1515                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1516
1517         return 0;
1518 }
1519
1520 static int
1521 i40e_dev_led_off(struct rte_eth_dev *dev)
1522 {
1523         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1524         uint32_t mode = i40e_led_get(hw);
1525
1526         if (mode != 0)
1527                 i40e_led_set(hw, 0, false);
1528
1529         return 0;
1530 }
1531
1532 static int
1533 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1534                    __rte_unused struct rte_eth_fc_conf *fc_conf)
1535 {
1536         PMD_INIT_FUNC_TRACE();
1537
1538         return -ENOSYS;
1539 }
1540
1541 static int
1542 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1543                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1544 {
1545         PMD_INIT_FUNC_TRACE();
1546
1547         return -ENOSYS;
1548 }
1549
1550 /* Add a MAC address, and update filters */
1551 static void
1552 i40e_macaddr_add(struct rte_eth_dev *dev,
1553                  struct ether_addr *mac_addr,
1554                  __attribute__((unused)) uint32_t index,
1555                  __attribute__((unused)) uint32_t pool)
1556 {
1557         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1558         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1559         struct i40e_mac_filter_info mac_filter;
1560         struct i40e_vsi *vsi = pf->main_vsi;
1561         struct ether_addr old_mac;
1562         int ret;
1563
1564         if (!is_valid_assigned_ether_addr(mac_addr)) {
1565                 PMD_DRV_LOG(ERR, "Invalid ethernet address");
1566                 return;
1567         }
1568
1569         if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
1570                 PMD_DRV_LOG(INFO, "Ignore adding permanent mac address");
1571                 return;
1572         }
1573
1574         /* Write mac address */
1575         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1576                                         mac_addr->addr_bytes, NULL);
1577         if (ret != I40E_SUCCESS) {
1578                 PMD_DRV_LOG(ERR, "Failed to write mac address");
1579                 return;
1580         }
1581
1582         (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1583         (void)rte_memcpy(hw->mac.addr, mac_addr->addr_bytes,
1584                         ETHER_ADDR_LEN);
1585         (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
1586         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
1587
1588         ret = i40e_vsi_add_mac(vsi, &mac_filter);
1589         if (ret != I40E_SUCCESS) {
1590                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
1591                 return;
1592         }
1593
1594         ether_addr_copy(mac_addr, &pf->dev_addr);
1595         i40e_vsi_delete_mac(vsi, &old_mac);
1596 }
1597
1598 /* Remove a MAC address, and update filters */
1599 static void
1600 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1601 {
1602         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1603         struct i40e_vsi *vsi = pf->main_vsi;
1604         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1605         struct ether_addr *macaddr;
1606         int ret;
1607         struct i40e_hw *hw =
1608                 I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1609
1610         if (index >= vsi->max_macaddrs)
1611                 return;
1612
1613         macaddr = &(data->mac_addrs[index]);
1614         if (!is_valid_assigned_ether_addr(macaddr))
1615                 return;
1616
1617         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1618                                         hw->mac.perm_addr, NULL);
1619         if (ret != I40E_SUCCESS) {
1620                 PMD_DRV_LOG(ERR, "Failed to write mac address");
1621                 return;
1622         }
1623
1624         (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
1625
1626         ret = i40e_vsi_delete_mac(vsi, macaddr);
1627         if (ret != I40E_SUCCESS)
1628                 return;
1629
1630         /* Clear device address as it has been removed */
1631         if (is_same_ether_addr(&(pf->dev_addr), macaddr))
1632                 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1633 }
1634
1635 /* Set perfect match or hash match of MAC and VLAN for a VF */
1636 static int
1637 i40e_vf_mac_filter_set(struct i40e_pf *pf,
1638                  struct rte_eth_mac_filter *filter,
1639                  bool add)
1640 {
1641         struct i40e_hw *hw;
1642         struct i40e_mac_filter_info mac_filter;
1643         struct ether_addr old_mac;
1644         struct ether_addr *new_mac;
1645         struct i40e_pf_vf *vf = NULL;
1646         uint16_t vf_id;
1647         int ret;
1648
1649         if (pf == NULL) {
1650                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
1651                 return -EINVAL;
1652         }
1653         hw = I40E_PF_TO_HW(pf);
1654
1655         if (filter == NULL) {
1656                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
1657                 return -EINVAL;
1658         }
1659
1660         new_mac = &filter->mac_addr;
1661
1662         if (is_zero_ether_addr(new_mac)) {
1663                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
1664                 return -EINVAL;
1665         }
1666
1667         vf_id = filter->dst_id;
1668
1669         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1670                 PMD_DRV_LOG(ERR, "Invalid argument.");
1671                 return -EINVAL;
1672         }
1673         vf = &pf->vfs[vf_id];
1674
1675         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
1676                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
1677                 return -EINVAL;
1678         }
1679
1680         if (add) {
1681                 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1682                 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
1683                                 ETHER_ADDR_LEN);
1684                 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
1685                                  ETHER_ADDR_LEN);
1686
1687                 mac_filter.filter_type = filter->filter_type;
1688                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
1689                 if (ret != I40E_SUCCESS) {
1690                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
1691                         return -1;
1692                 }
1693                 ether_addr_copy(new_mac, &pf->dev_addr);
1694         } else {
1695                 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
1696                                 ETHER_ADDR_LEN);
1697                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
1698                 if (ret != I40E_SUCCESS) {
1699                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
1700                         return -1;
1701                 }
1702
1703                 /* Clear device address as it has been removed */
1704                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
1705                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1706         }
1707
1708         return 0;
1709 }
1710
1711 /* MAC filter handle */
1712 static int
1713 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1714                 void *arg)
1715 {
1716         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1717         struct rte_eth_mac_filter *filter;
1718         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1719         int ret = I40E_NOT_SUPPORTED;
1720
1721         filter = (struct rte_eth_mac_filter *)(arg);
1722
1723         switch (filter_op) {
1724         case RTE_ETH_FILTER_NONE:
1725                 ret = I40E_SUCCESS;
1726                 break;
1727         case RTE_ETH_FILTER_ADD:
1728                 i40e_pf_disable_irq0(hw);
1729                 if (filter->is_vf)
1730                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
1731                 i40e_pf_enable_irq0(hw);
1732                 break;
1733         case RTE_ETH_FILTER_DELETE:
1734                 i40e_pf_disable_irq0(hw);
1735                 if (filter->is_vf)
1736                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
1737                 i40e_pf_enable_irq0(hw);
1738                 break;
1739         default:
1740                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1741                 ret = I40E_ERR_PARAM;
1742                 break;
1743         }
1744
1745         return ret;
1746 }
1747
1748 static int
1749 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1750                          struct rte_eth_rss_reta *reta_conf)
1751 {
1752         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1753         uint32_t lut, l;
1754         uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1755
1756         for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1757                 if (i < max)
1758                         mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1759                 else
1760                         mask = (uint8_t)((reta_conf->mask_hi >>
1761                                                 (i - max)) & 0xF);
1762
1763                 if (!mask)
1764                         continue;
1765
1766                 if (mask == 0xF)
1767                         l = 0;
1768                 else
1769                         l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1770
1771                 for (j = 0, lut = 0; j < 4; j++) {
1772                         if (mask & (0x1 << j))
1773                                 lut |= reta_conf->reta[i + j] << (8 * j);
1774                         else
1775                                 lut |= l & (0xFF << (8 * j));
1776                 }
1777                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1778         }
1779
1780         return 0;
1781 }
1782
1783 static int
1784 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1785                         struct rte_eth_rss_reta *reta_conf)
1786 {
1787         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1788         uint32_t lut;
1789         uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1790
1791         for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1792                 if (i < max)
1793                         mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1794                 else
1795                         mask = (uint8_t)((reta_conf->mask_hi >>
1796                                                 (i - max)) & 0xF);
1797
1798                 if (!mask)
1799                         continue;
1800
1801                 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1802                 for (j = 0; j < 4; j++) {
1803                         if (mask & (0x1 << j))
1804                                 reta_conf->reta[i + j] =
1805                                         (uint8_t)((lut >> (8 * j)) & 0xFF);
1806                 }
1807         }
1808
1809         return 0;
1810 }
1811
1812 /**
1813  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
1814  * @hw:   pointer to the HW structure
1815  * @mem:  pointer to mem struct to fill out
1816  * @size: size of memory requested
1817  * @alignment: what to align the allocation to
1818  **/
1819 enum i40e_status_code
1820 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1821                         struct i40e_dma_mem *mem,
1822                         u64 size,
1823                         u32 alignment)
1824 {
1825         static uint64_t id = 0;
1826         const struct rte_memzone *mz = NULL;
1827         char z_name[RTE_MEMZONE_NAMESIZE];
1828
1829         if (!mem)
1830                 return I40E_ERR_PARAM;
1831
1832         id++;
1833         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
1834 #ifdef RTE_LIBRTE_XEN_DOM0
1835         mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
1836                                                         RTE_PGSIZE_2M);
1837 #else
1838         mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1839 #endif
1840         if (!mz)
1841                 return I40E_ERR_NO_MEMORY;
1842
1843         mem->id = id;
1844         mem->size = size;
1845         mem->va = mz->addr;
1846 #ifdef RTE_LIBRTE_XEN_DOM0
1847         mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1848 #else
1849         mem->pa = mz->phys_addr;
1850 #endif
1851
1852         return I40E_SUCCESS;
1853 }
1854
1855 /**
1856  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
1857  * @hw:   pointer to the HW structure
1858  * @mem:  ptr to mem struct to free
1859  **/
1860 enum i40e_status_code
1861 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1862                     struct i40e_dma_mem *mem)
1863 {
1864         if (!mem || !mem->va)
1865                 return I40E_ERR_PARAM;
1866
1867         mem->va = NULL;
1868         mem->pa = (u64)0;
1869
1870         return I40E_SUCCESS;
1871 }
1872
1873 /**
1874  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
1875  * @hw:   pointer to the HW structure
1876  * @mem:  pointer to mem struct to fill out
1877  * @size: size of memory requested
1878  **/
1879 enum i40e_status_code
1880 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1881                          struct i40e_virt_mem *mem,
1882                          u32 size)
1883 {
1884         if (!mem)
1885                 return I40E_ERR_PARAM;
1886
1887         mem->size = size;
1888         mem->va = rte_zmalloc("i40e", size, 0);
1889
1890         if (mem->va)
1891                 return I40E_SUCCESS;
1892         else
1893                 return I40E_ERR_NO_MEMORY;
1894 }
1895
1896 /**
1897  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
1898  * @hw:   pointer to the HW structure
1899  * @mem:  pointer to mem struct to free
1900  **/
1901 enum i40e_status_code
1902 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1903                      struct i40e_virt_mem *mem)
1904 {
1905         if (!mem)
1906                 return I40E_ERR_PARAM;
1907
1908         rte_free(mem->va);
1909         mem->va = NULL;
1910
1911         return I40E_SUCCESS;
1912 }
1913
1914 void
1915 i40e_init_spinlock_d(struct i40e_spinlock *sp)
1916 {
1917         rte_spinlock_init(&sp->spinlock);
1918 }
1919
1920 void
1921 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
1922 {
1923         rte_spinlock_lock(&sp->spinlock);
1924 }
1925
1926 void
1927 i40e_release_spinlock_d(struct i40e_spinlock *sp)
1928 {
1929         rte_spinlock_unlock(&sp->spinlock);
1930 }
1931
1932 void
1933 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
1934 {
1935         return;
1936 }
1937
1938 /**
1939  * Get the hardware capabilities, which will be parsed
1940  * and saved into struct i40e_hw.
1941  */
1942 static int
1943 i40e_get_cap(struct i40e_hw *hw)
1944 {
1945         struct i40e_aqc_list_capabilities_element_resp *buf;
1946         uint16_t len, size = 0;
1947         int ret;
1948
1949         /* Calculate a huge enough buff for saving response data temporarily */
1950         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
1951                                                 I40E_MAX_CAP_ELE_NUM;
1952         buf = rte_zmalloc("i40e", len, 0);
1953         if (!buf) {
1954                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1955                 return I40E_ERR_NO_MEMORY;
1956         }
1957
1958         /* Get, parse the capabilities and save it to hw */
1959         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
1960                         i40e_aqc_opc_list_func_capabilities, NULL);
1961         if (ret != I40E_SUCCESS)
1962                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
1963
1964         /* Free the temporary buffer after being used */
1965         rte_free(buf);
1966
1967         return ret;
1968 }
1969
1970 static int
1971 i40e_pf_parameter_init(struct rte_eth_dev *dev)
1972 {
1973         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1974         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1975         uint16_t sum_queues = 0, sum_vsis;
1976
1977         /* First check if FW support SRIOV */
1978         if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
1979                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
1980                 return -EINVAL;
1981         }
1982
1983         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
1984         pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
1985         PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
1986         /* Allocate queues for pf */
1987         if (hw->func_caps.rss) {
1988                 pf->flags |= I40E_FLAG_RSS;
1989                 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
1990                         (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
1991                 pf->lan_nb_qps = i40e_prev_power_of_2(pf->lan_nb_qps);
1992         } else
1993                 pf->lan_nb_qps = 1;
1994         sum_queues = pf->lan_nb_qps;
1995         /* Default VSI is not counted in */
1996         sum_vsis = 0;
1997         PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
1998
1999         if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
2000                 pf->flags |= I40E_FLAG_SRIOV;
2001                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
2002                 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
2003                         PMD_INIT_LOG(ERR, "Config VF number %u, "
2004                                      "max supported %u.",
2005                                      dev->pci_dev->max_vfs,
2006                                      hw->func_caps.num_vfs);
2007                         return -EINVAL;
2008                 }
2009                 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
2010                         PMD_INIT_LOG(ERR, "FVL VF queue %u, "
2011                                      "max support %u queues.",
2012                                      pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
2013                         return -EINVAL;
2014                 }
2015                 pf->vf_num = dev->pci_dev->max_vfs;
2016                 sum_queues += pf->vf_nb_qps * pf->vf_num;
2017                 sum_vsis   += pf->vf_num;
2018                 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
2019                              pf->vf_num, pf->vf_nb_qps);
2020         } else
2021                 pf->vf_num = 0;
2022
2023         if (hw->func_caps.vmdq) {
2024                 pf->flags |= I40E_FLAG_VMDQ;
2025                 pf->vmdq_nb_qps = I40E_DEFAULT_QP_NUM_VMDQ;
2026                 sum_queues += pf->vmdq_nb_qps;
2027                 sum_vsis += 1;
2028                 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
2029         }
2030
2031         if (hw->func_caps.fd) {
2032                 pf->flags |= I40E_FLAG_FDIR;
2033                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
2034                 /**
2035                  * Each flow director consumes one VSI and one queue,
2036                  * but can't calculate out predictably here.
2037                  */
2038         }
2039
2040         if (sum_vsis > pf->max_num_vsi ||
2041                 sum_queues > hw->func_caps.num_rx_qp) {
2042                 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
2043                 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
2044                              pf->max_num_vsi, sum_vsis);
2045                 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
2046                              hw->func_caps.num_rx_qp, sum_queues);
2047                 return -EINVAL;
2048         }
2049
2050         /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
2051          * cause */
2052         if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
2053                 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
2054                              sum_vsis, hw->func_caps.num_msix_vectors);
2055                 return -EINVAL;
2056         }
2057         return I40E_SUCCESS;
2058 }
2059
2060 static int
2061 i40e_pf_get_switch_config(struct i40e_pf *pf)
2062 {
2063         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2064         struct i40e_aqc_get_switch_config_resp *switch_config;
2065         struct i40e_aqc_switch_config_element_resp *element;
2066         uint16_t start_seid = 0, num_reported;
2067         int ret;
2068
2069         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
2070                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
2071         if (!switch_config) {
2072                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
2073                 return -ENOMEM;
2074         }
2075
2076         /* Get the switch configurations */
2077         ret = i40e_aq_get_switch_config(hw, switch_config,
2078                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
2079         if (ret != I40E_SUCCESS) {
2080                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
2081                 goto fail;
2082         }
2083         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
2084         if (num_reported != 1) { /* The number should be 1 */
2085                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
2086                 goto fail;
2087         }
2088
2089         /* Parse the switch configuration elements */
2090         element = &(switch_config->element[0]);
2091         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
2092                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
2093                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
2094         } else
2095                 PMD_DRV_LOG(INFO, "Unknown element type");
2096
2097 fail:
2098         rte_free(switch_config);
2099
2100         return ret;
2101 }
2102
2103 static int
2104 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
2105                         uint32_t num)
2106 {
2107         struct pool_entry *entry;
2108
2109         if (pool == NULL || num == 0)
2110                 return -EINVAL;
2111
2112         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
2113         if (entry == NULL) {
2114                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
2115                 return -ENOMEM;
2116         }
2117
2118         /* queue heap initialize */
2119         pool->num_free = num;
2120         pool->num_alloc = 0;
2121         pool->base = base;
2122         LIST_INIT(&pool->alloc_list);
2123         LIST_INIT(&pool->free_list);
2124
2125         /* Initialize element  */
2126         entry->base = 0;
2127         entry->len = num;
2128
2129         LIST_INSERT_HEAD(&pool->free_list, entry, next);
2130         return 0;
2131 }
2132
2133 static void
2134 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
2135 {
2136         struct pool_entry *entry;
2137
2138         if (pool == NULL)
2139                 return;
2140
2141         LIST_FOREACH(entry, &pool->alloc_list, next) {
2142                 LIST_REMOVE(entry, next);
2143                 rte_free(entry);
2144         }
2145
2146         LIST_FOREACH(entry, &pool->free_list, next) {
2147                 LIST_REMOVE(entry, next);
2148                 rte_free(entry);
2149         }
2150
2151         pool->num_free = 0;
2152         pool->num_alloc = 0;
2153         pool->base = 0;
2154         LIST_INIT(&pool->alloc_list);
2155         LIST_INIT(&pool->free_list);
2156 }
2157
2158 static int
2159 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2160                        uint32_t base)
2161 {
2162         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2163         uint32_t pool_offset;
2164         int insert;
2165
2166         if (pool == NULL) {
2167                 PMD_DRV_LOG(ERR, "Invalid parameter");
2168                 return -EINVAL;
2169         }
2170
2171         pool_offset = base - pool->base;
2172         /* Lookup in alloc list */
2173         LIST_FOREACH(entry, &pool->alloc_list, next) {
2174                 if (entry->base == pool_offset) {
2175                         valid_entry = entry;
2176                         LIST_REMOVE(entry, next);
2177                         break;
2178                 }
2179         }
2180
2181         /* Not find, return */
2182         if (valid_entry == NULL) {
2183                 PMD_DRV_LOG(ERR, "Failed to find entry");
2184                 return -EINVAL;
2185         }
2186
2187         /**
2188          * Found it, move it to free list  and try to merge.
2189          * In order to make merge easier, always sort it by qbase.
2190          * Find adjacent prev and last entries.
2191          */
2192         prev = next = NULL;
2193         LIST_FOREACH(entry, &pool->free_list, next) {
2194                 if (entry->base > valid_entry->base) {
2195                         next = entry;
2196                         break;
2197                 }
2198                 prev = entry;
2199         }
2200
2201         insert = 0;
2202         /* Try to merge with next one*/
2203         if (next != NULL) {
2204                 /* Merge with next one */
2205                 if (valid_entry->base + valid_entry->len == next->base) {
2206                         next->base = valid_entry->base;
2207                         next->len += valid_entry->len;
2208                         rte_free(valid_entry);
2209                         valid_entry = next;
2210                         insert = 1;
2211                 }
2212         }
2213
2214         if (prev != NULL) {
2215                 /* Merge with previous one */
2216                 if (prev->base + prev->len == valid_entry->base) {
2217                         prev->len += valid_entry->len;
2218                         /* If it merge with next one, remove next node */
2219                         if (insert == 1) {
2220                                 LIST_REMOVE(valid_entry, next);
2221                                 rte_free(valid_entry);
2222                         } else {
2223                                 rte_free(valid_entry);
2224                                 insert = 1;
2225                         }
2226                 }
2227         }
2228
2229         /* Not find any entry to merge, insert */
2230         if (insert == 0) {
2231                 if (prev != NULL)
2232                         LIST_INSERT_AFTER(prev, valid_entry, next);
2233                 else if (next != NULL)
2234                         LIST_INSERT_BEFORE(next, valid_entry, next);
2235                 else /* It's empty list, insert to head */
2236                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2237         }
2238
2239         pool->num_free += valid_entry->len;
2240         pool->num_alloc -= valid_entry->len;
2241
2242         return 0;
2243 }
2244
2245 static int
2246 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2247                        uint16_t num)
2248 {
2249         struct pool_entry *entry, *valid_entry;
2250
2251         if (pool == NULL || num == 0) {
2252                 PMD_DRV_LOG(ERR, "Invalid parameter");
2253                 return -EINVAL;
2254         }
2255
2256         if (pool->num_free < num) {
2257                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
2258                             num, pool->num_free);
2259                 return -ENOMEM;
2260         }
2261
2262         valid_entry = NULL;
2263         /* Lookup  in free list and find most fit one */
2264         LIST_FOREACH(entry, &pool->free_list, next) {
2265                 if (entry->len >= num) {
2266                         /* Find best one */
2267                         if (entry->len == num) {
2268                                 valid_entry = entry;
2269                                 break;
2270                         }
2271                         if (valid_entry == NULL || valid_entry->len > entry->len)
2272                                 valid_entry = entry;
2273                 }
2274         }
2275
2276         /* Not find one to satisfy the request, return */
2277         if (valid_entry == NULL) {
2278                 PMD_DRV_LOG(ERR, "No valid entry found");
2279                 return -ENOMEM;
2280         }
2281         /**
2282          * The entry have equal queue number as requested,
2283          * remove it from alloc_list.
2284          */
2285         if (valid_entry->len == num) {
2286                 LIST_REMOVE(valid_entry, next);
2287         } else {
2288                 /**
2289                  * The entry have more numbers than requested,
2290                  * create a new entry for alloc_list and minus its
2291                  * queue base and number in free_list.
2292                  */
2293                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2294                 if (entry == NULL) {
2295                         PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2296                                     "resource pool");
2297                         return -ENOMEM;
2298                 }
2299                 entry->base = valid_entry->base;
2300                 entry->len = num;
2301                 valid_entry->base += num;
2302                 valid_entry->len -= num;
2303                 valid_entry = entry;
2304         }
2305
2306         /* Insert it into alloc list, not sorted */
2307         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2308
2309         pool->num_free -= valid_entry->len;
2310         pool->num_alloc += valid_entry->len;
2311
2312         return (valid_entry->base + pool->base);
2313 }
2314
2315 /**
2316  * bitmap_is_subset - Check whether src2 is subset of src1
2317  **/
2318 static inline int
2319 bitmap_is_subset(uint8_t src1, uint8_t src2)
2320 {
2321         return !((src1 ^ src2) & src2);
2322 }
2323
2324 static int
2325 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2326 {
2327         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2328
2329         /* If DCB is not supported, only default TC is supported */
2330         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2331                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
2332                 return -EINVAL;
2333         }
2334
2335         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2336                 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2337                             "HW support 0x%x", hw->func_caps.enabled_tcmap,
2338                             enabled_tcmap);
2339                 return -EINVAL;
2340         }
2341         return I40E_SUCCESS;
2342 }
2343
2344 int
2345 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2346                                 struct i40e_vsi_vlan_pvid_info *info)
2347 {
2348         struct i40e_hw *hw;
2349         struct i40e_vsi_context ctxt;
2350         uint8_t vlan_flags = 0;
2351         int ret;
2352
2353         if (vsi == NULL || info == NULL) {
2354                 PMD_DRV_LOG(ERR, "invalid parameters");
2355                 return I40E_ERR_PARAM;
2356         }
2357
2358         if (info->on) {
2359                 vsi->info.pvid = info->config.pvid;
2360                 /**
2361                  * If insert pvid is enabled, only tagged pkts are
2362                  * allowed to be sent out.
2363                  */
2364                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2365                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2366         } else {
2367                 vsi->info.pvid = 0;
2368                 if (info->config.reject.tagged == 0)
2369                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2370
2371                 if (info->config.reject.untagged == 0)
2372                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2373         }
2374         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2375                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
2376         vsi->info.port_vlan_flags |= vlan_flags;
2377         vsi->info.valid_sections =
2378                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2379         memset(&ctxt, 0, sizeof(ctxt));
2380         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2381         ctxt.seid = vsi->seid;
2382
2383         hw = I40E_VSI_TO_HW(vsi);
2384         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2385         if (ret != I40E_SUCCESS)
2386                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2387
2388         return ret;
2389 }
2390
2391 static int
2392 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2393 {
2394         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2395         int i, ret;
2396         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2397
2398         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2399         if (ret != I40E_SUCCESS)
2400                 return ret;
2401
2402         if (!vsi->seid) {
2403                 PMD_DRV_LOG(ERR, "seid not valid");
2404                 return -EINVAL;
2405         }
2406
2407         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2408         tc_bw_data.tc_valid_bits = enabled_tcmap;
2409         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2410                 tc_bw_data.tc_bw_credits[i] =
2411                         (enabled_tcmap & (1 << i)) ? 1 : 0;
2412
2413         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2414         if (ret != I40E_SUCCESS) {
2415                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
2416                 return ret;
2417         }
2418
2419         (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2420                                         sizeof(vsi->info.qs_handle));
2421         return I40E_SUCCESS;
2422 }
2423
2424 static int
2425 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2426                                  struct i40e_aqc_vsi_properties_data *info,
2427                                  uint8_t enabled_tcmap)
2428 {
2429         int ret, total_tc = 0, i;
2430         uint16_t qpnum_per_tc, bsf, qp_idx;
2431
2432         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2433         if (ret != I40E_SUCCESS)
2434                 return ret;
2435
2436         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2437                 if (enabled_tcmap & (1 << i))
2438                         total_tc++;
2439         vsi->enabled_tc = enabled_tcmap;
2440
2441         /* Number of queues per enabled TC */
2442         qpnum_per_tc = i40e_prev_power_of_2(vsi->nb_qps / total_tc);
2443         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2444         bsf = rte_bsf32(qpnum_per_tc);
2445
2446         /* Adjust the queue number to actual queues that can be applied */
2447         vsi->nb_qps = qpnum_per_tc * total_tc;
2448
2449         /**
2450          * Configure TC and queue mapping parameters, for enabled TC,
2451          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2452          * default queue will serve it.
2453          */
2454         qp_idx = 0;
2455         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2456                 if (vsi->enabled_tc & (1 << i)) {
2457                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2458                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2459                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2460                         qp_idx += qpnum_per_tc;
2461                 } else
2462                         info->tc_mapping[i] = 0;
2463         }
2464
2465         /* Associate queue number with VSI */
2466         if (vsi->type == I40E_VSI_SRIOV) {
2467                 info->mapping_flags |=
2468                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2469                 for (i = 0; i < vsi->nb_qps; i++)
2470                         info->queue_mapping[i] =
2471                                 rte_cpu_to_le_16(vsi->base_queue + i);
2472         } else {
2473                 info->mapping_flags |=
2474                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2475                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2476         }
2477         info->valid_sections =
2478                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2479
2480         return I40E_SUCCESS;
2481 }
2482
2483 static int
2484 i40e_veb_release(struct i40e_veb *veb)
2485 {
2486         struct i40e_vsi *vsi;
2487         struct i40e_hw *hw;
2488
2489         if (veb == NULL || veb->associate_vsi == NULL)
2490                 return -EINVAL;
2491
2492         if (!TAILQ_EMPTY(&veb->head)) {
2493                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
2494                 return -EACCES;
2495         }
2496
2497         vsi = veb->associate_vsi;
2498         hw = I40E_VSI_TO_HW(vsi);
2499
2500         vsi->uplink_seid = veb->uplink_seid;
2501         i40e_aq_delete_element(hw, veb->seid, NULL);
2502         rte_free(veb);
2503         vsi->veb = NULL;
2504         return I40E_SUCCESS;
2505 }
2506
2507 /* Setup a veb */
2508 static struct i40e_veb *
2509 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2510 {
2511         struct i40e_veb *veb;
2512         int ret;
2513         struct i40e_hw *hw;
2514
2515         if (NULL == pf || vsi == NULL) {
2516                 PMD_DRV_LOG(ERR, "veb setup failed, "
2517                             "associated VSI shouldn't null");
2518                 return NULL;
2519         }
2520         hw = I40E_PF_TO_HW(pf);
2521
2522         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2523         if (!veb) {
2524                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
2525                 goto fail;
2526         }
2527
2528         veb->associate_vsi = vsi;
2529         TAILQ_INIT(&veb->head);
2530         veb->uplink_seid = vsi->uplink_seid;
2531
2532         ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2533                 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2534
2535         if (ret != I40E_SUCCESS) {
2536                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
2537                             hw->aq.asq_last_status);
2538                 goto fail;
2539         }
2540
2541         /* get statistics index */
2542         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2543                                 &veb->stats_idx, NULL, NULL, NULL);
2544         if (ret != I40E_SUCCESS) {
2545                 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
2546                             hw->aq.asq_last_status);
2547                 goto fail;
2548         }
2549
2550         /* Get VEB bandwidth, to be implemented */
2551         /* Now associated vsi binding to the VEB, set uplink to this VEB */
2552         vsi->uplink_seid = veb->seid;
2553
2554         return veb;
2555 fail:
2556         rte_free(veb);
2557         return NULL;
2558 }
2559
2560 int
2561 i40e_vsi_release(struct i40e_vsi *vsi)
2562 {
2563         struct i40e_pf *pf;
2564         struct i40e_hw *hw;
2565         struct i40e_vsi_list *vsi_list;
2566         int ret;
2567         struct i40e_mac_filter *f;
2568
2569         if (!vsi)
2570                 return I40E_SUCCESS;
2571
2572         pf = I40E_VSI_TO_PF(vsi);
2573         hw = I40E_VSI_TO_HW(vsi);
2574
2575         /* VSI has child to attach, release child first */
2576         if (vsi->veb) {
2577                 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2578                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2579                                 return -1;
2580                         TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2581                 }
2582                 i40e_veb_release(vsi->veb);
2583         }
2584
2585         /* Remove all macvlan filters of the VSI */
2586         i40e_vsi_remove_all_macvlan_filter(vsi);
2587         TAILQ_FOREACH(f, &vsi->mac_list, next)
2588                 rte_free(f);
2589
2590         if (vsi->type != I40E_VSI_MAIN) {
2591                 /* Remove vsi from parent's sibling list */
2592                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2593                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
2594                         return I40E_ERR_PARAM;
2595                 }
2596                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2597                                 &vsi->sib_vsi_list, list);
2598
2599                 /* Remove all switch element of the VSI */
2600                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2601                 if (ret != I40E_SUCCESS)
2602                         PMD_DRV_LOG(ERR, "Failed to delete element");
2603         }
2604         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2605
2606         if (vsi->type != I40E_VSI_SRIOV)
2607                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2608         rte_free(vsi);
2609
2610         return I40E_SUCCESS;
2611 }
2612
2613 static int
2614 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2615 {
2616         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2617         struct i40e_aqc_remove_macvlan_element_data def_filter;
2618         struct i40e_mac_filter_info filter;
2619         int ret;
2620
2621         if (vsi->type != I40E_VSI_MAIN)
2622                 return I40E_ERR_CONFIG;
2623         memset(&def_filter, 0, sizeof(def_filter));
2624         (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2625                                         ETH_ADDR_LEN);
2626         def_filter.vlan_tag = 0;
2627         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2628                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2629         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2630         if (ret != I40E_SUCCESS) {
2631                 struct i40e_mac_filter *f;
2632                 struct ether_addr *mac;
2633
2634                 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2635                             "macvlan filter");
2636                 /* It needs to add the permanent mac into mac list */
2637                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2638                 if (f == NULL) {
2639                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2640                         return I40E_ERR_NO_MEMORY;
2641                 }
2642                 mac = &f->mac_info.mac_addr;
2643                 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
2644                                 ETH_ADDR_LEN);
2645                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2646                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2647                 vsi->mac_num++;
2648
2649                 return ret;
2650         }
2651         (void)rte_memcpy(&filter.mac_addr,
2652                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
2653         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2654         return i40e_vsi_add_mac(vsi, &filter);
2655 }
2656
2657 static int
2658 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2659 {
2660         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2661         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2662         struct i40e_hw *hw = &vsi->adapter->hw;
2663         i40e_status ret;
2664         int i;
2665
2666         memset(&bw_config, 0, sizeof(bw_config));
2667         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2668         if (ret != I40E_SUCCESS) {
2669                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
2670                             hw->aq.asq_last_status);
2671                 return ret;
2672         }
2673
2674         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2675         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2676                                         &ets_sla_config, NULL);
2677         if (ret != I40E_SUCCESS) {
2678                 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2679                             "configuration %u", hw->aq.asq_last_status);
2680                 return ret;
2681         }
2682
2683         /* Not store the info yet, just print out */
2684         PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
2685         PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
2686         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2687                 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
2688                             ets_sla_config.share_credits[i]);
2689                 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
2690                             rte_le_to_cpu_16(ets_sla_config.credits[i]));
2691                 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2692                             rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2693                             (i * 4));
2694         }
2695
2696         return 0;
2697 }
2698
2699 /* Setup a VSI */
2700 struct i40e_vsi *
2701 i40e_vsi_setup(struct i40e_pf *pf,
2702                enum i40e_vsi_type type,
2703                struct i40e_vsi *uplink_vsi,
2704                uint16_t user_param)
2705 {
2706         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2707         struct i40e_vsi *vsi;
2708         struct i40e_mac_filter_info filter;
2709         int ret;
2710         struct i40e_vsi_context ctxt;
2711         struct ether_addr broadcast =
2712                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2713
2714         if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2715                 PMD_DRV_LOG(ERR, "VSI setup failed, "
2716                             "VSI link shouldn't be NULL");
2717                 return NULL;
2718         }
2719
2720         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2721                 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2722                             "uplink VSI should be NULL");
2723                 return NULL;
2724         }
2725
2726         /* If uplink vsi didn't setup VEB, create one first */
2727         if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2728                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2729
2730                 if (NULL == uplink_vsi->veb) {
2731                         PMD_DRV_LOG(ERR, "VEB setup failed");
2732                         return NULL;
2733                 }
2734         }
2735
2736         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2737         if (!vsi) {
2738                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
2739                 return NULL;
2740         }
2741         TAILQ_INIT(&vsi->mac_list);
2742         vsi->type = type;
2743         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2744         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2745         vsi->parent_vsi = uplink_vsi;
2746         vsi->user_param = user_param;
2747         /* Allocate queues */
2748         switch (vsi->type) {
2749         case I40E_VSI_MAIN  :
2750                 vsi->nb_qps = pf->lan_nb_qps;
2751                 break;
2752         case I40E_VSI_SRIOV :
2753                 vsi->nb_qps = pf->vf_nb_qps;
2754                 break;
2755         default:
2756                 goto fail_mem;
2757         }
2758         ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2759         if (ret < 0) {
2760                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2761                                 vsi->seid, ret);
2762                 goto fail_mem;
2763         }
2764         vsi->base_queue = ret;
2765
2766         /* VF has MSIX interrupt in VF range, don't allocate here */
2767         if (type != I40E_VSI_SRIOV) {
2768                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2769                 if (ret < 0) {
2770                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2771                         goto fail_queue_alloc;
2772                 }
2773                 vsi->msix_intr = ret;
2774         } else
2775                 vsi->msix_intr = 0;
2776         /* Add VSI */
2777         if (type == I40E_VSI_MAIN) {
2778                 /* For main VSI, no need to add since it's default one */
2779                 vsi->uplink_seid = pf->mac_seid;
2780                 vsi->seid = pf->main_vsi_seid;
2781                 /* Bind queues with specific MSIX interrupt */
2782                 /**
2783                  * Needs 2 interrupt at least, one for misc cause which will
2784                  * enabled from OS side, Another for queues binding the
2785                  * interrupt from device side only.
2786                  */
2787
2788                 /* Get default VSI parameters from hardware */
2789                 memset(&ctxt, 0, sizeof(ctxt));
2790                 ctxt.seid = vsi->seid;
2791                 ctxt.pf_num = hw->pf_id;
2792                 ctxt.uplink_seid = vsi->uplink_seid;
2793                 ctxt.vf_num = 0;
2794                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2795                 if (ret != I40E_SUCCESS) {
2796                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
2797                         goto fail_msix_alloc;
2798                 }
2799                 (void)rte_memcpy(&vsi->info, &ctxt.info,
2800                         sizeof(struct i40e_aqc_vsi_properties_data));
2801                 vsi->vsi_id = ctxt.vsi_number;
2802                 vsi->info.valid_sections = 0;
2803
2804                 /* Configure tc, enabled TC0 only */
2805                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
2806                         I40E_SUCCESS) {
2807                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
2808                         goto fail_msix_alloc;
2809                 }
2810
2811                 /* TC, queue mapping */
2812                 memset(&ctxt, 0, sizeof(ctxt));
2813                 vsi->info.valid_sections |=
2814                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2815                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2816                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2817                 (void)rte_memcpy(&ctxt.info, &vsi->info,
2818                         sizeof(struct i40e_aqc_vsi_properties_data));
2819                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2820                                                 I40E_DEFAULT_TCMAP);
2821                 if (ret != I40E_SUCCESS) {
2822                         PMD_DRV_LOG(ERR, "Failed to configure "
2823                                     "TC queue mapping");
2824                         goto fail_msix_alloc;
2825                 }
2826                 ctxt.seid = vsi->seid;
2827                 ctxt.pf_num = hw->pf_id;
2828                 ctxt.uplink_seid = vsi->uplink_seid;
2829                 ctxt.vf_num = 0;
2830
2831                 /* Update VSI parameters */
2832                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2833                 if (ret != I40E_SUCCESS) {
2834                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
2835                         goto fail_msix_alloc;
2836                 }
2837
2838                 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2839                                                 sizeof(vsi->info.tc_mapping));
2840                 (void)rte_memcpy(&vsi->info.queue_mapping,
2841                                 &ctxt.info.queue_mapping,
2842                         sizeof(vsi->info.queue_mapping));
2843                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2844                 vsi->info.valid_sections = 0;
2845
2846                 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
2847                                 ETH_ADDR_LEN);
2848
2849                 /**
2850                  * Updating default filter settings are necessary to prevent
2851                  * reception of tagged packets.
2852                  * Some old firmware configurations load a default macvlan
2853                  * filter which accepts both tagged and untagged packets.
2854                  * The updating is to use a normal filter instead if needed.
2855                  * For NVM 4.2.2 or after, the updating is not needed anymore.
2856                  * The firmware with correct configurations load the default
2857                  * macvlan filter which is expected and cannot be removed.
2858                  */
2859                 i40e_update_default_filter_setting(vsi);
2860         } else if (type == I40E_VSI_SRIOV) {
2861                 memset(&ctxt, 0, sizeof(ctxt));
2862                 /**
2863                  * For other VSI, the uplink_seid equals to uplink VSI's
2864                  * uplink_seid since they share same VEB
2865                  */
2866                 vsi->uplink_seid = uplink_vsi->uplink_seid;
2867                 ctxt.pf_num = hw->pf_id;
2868                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
2869                 ctxt.uplink_seid = vsi->uplink_seid;
2870                 ctxt.connection_type = 0x1;
2871                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2872
2873                 /* Configure switch ID */
2874                 ctxt.info.valid_sections |=
2875                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2876                 ctxt.info.switch_id =
2877                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2878                 /* Configure port/vlan */
2879                 ctxt.info.valid_sections |=
2880                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2881                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2882                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2883                                                 I40E_DEFAULT_TCMAP);
2884                 if (ret != I40E_SUCCESS) {
2885                         PMD_DRV_LOG(ERR, "Failed to configure "
2886                                     "TC queue mapping");
2887                         goto fail_msix_alloc;
2888                 }
2889                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2890                 ctxt.info.valid_sections |=
2891                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2892                 /**
2893                  * Since VSI is not created yet, only configure parameter,
2894                  * will add vsi below.
2895                  */
2896         }
2897         else {
2898                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
2899                 goto fail_msix_alloc;
2900         }
2901
2902         if (vsi->type != I40E_VSI_MAIN) {
2903                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
2904                 if (ret) {
2905                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
2906                                     hw->aq.asq_last_status);
2907                         goto fail_msix_alloc;
2908                 }
2909                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2910                 vsi->info.valid_sections = 0;
2911                 vsi->seid = ctxt.seid;
2912                 vsi->vsi_id = ctxt.vsi_number;
2913                 vsi->sib_vsi_list.vsi = vsi;
2914                 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
2915                                 &vsi->sib_vsi_list, list);
2916         }
2917
2918         /* MAC/VLAN configuration */
2919         (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
2920         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2921
2922         ret = i40e_vsi_add_mac(vsi, &filter);
2923         if (ret != I40E_SUCCESS) {
2924                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
2925                 goto fail_msix_alloc;
2926         }
2927
2928         /* Get VSI BW information */
2929         i40e_vsi_dump_bw_config(vsi);
2930         return vsi;
2931 fail_msix_alloc:
2932         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
2933 fail_queue_alloc:
2934         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
2935 fail_mem:
2936         rte_free(vsi);
2937         return NULL;
2938 }
2939
2940 /* Configure vlan stripping on or off */
2941 int
2942 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
2943 {
2944         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2945         struct i40e_vsi_context ctxt;
2946         uint8_t vlan_flags;
2947         int ret = I40E_SUCCESS;
2948
2949         /* Check if it has been already on or off */
2950         if (vsi->info.valid_sections &
2951                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
2952                 if (on) {
2953                         if ((vsi->info.port_vlan_flags &
2954                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
2955                                 return 0; /* already on */
2956                 } else {
2957                         if ((vsi->info.port_vlan_flags &
2958                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2959                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
2960                                 return 0; /* already off */
2961                 }
2962         }
2963
2964         if (on)
2965                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2966         else
2967                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2968         vsi->info.valid_sections =
2969                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2970         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
2971         vsi->info.port_vlan_flags |= vlan_flags;
2972         ctxt.seid = vsi->seid;
2973         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2974         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2975         if (ret)
2976                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2977                             on ? "enable" : "disable");
2978
2979         return ret;
2980 }
2981
2982 static int
2983 i40e_dev_init_vlan(struct rte_eth_dev *dev)
2984 {
2985         struct rte_eth_dev_data *data = dev->data;
2986         int ret;
2987
2988         /* Apply vlan offload setting */
2989         i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
2990
2991         /* Apply double-vlan setting, not implemented yet */
2992
2993         /* Apply pvid setting */
2994         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
2995                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
2996         if (ret)
2997                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
2998
2999         return ret;
3000 }
3001
3002 static int
3003 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
3004 {
3005         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3006
3007         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
3008 }
3009
3010 static int
3011 i40e_update_flow_control(struct i40e_hw *hw)
3012 {
3013 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
3014         struct i40e_link_status link_status;
3015         uint32_t rxfc = 0, txfc = 0, reg;
3016         uint8_t an_info;
3017         int ret;
3018
3019         memset(&link_status, 0, sizeof(link_status));
3020         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
3021         if (ret != I40E_SUCCESS) {
3022                 PMD_DRV_LOG(ERR, "Failed to get link status information");
3023                 goto write_reg; /* Disable flow control */
3024         }
3025
3026         an_info = hw->phy.link_info.an_info;
3027         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
3028                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
3029                 ret = I40E_ERR_NOT_READY;
3030                 goto write_reg; /* Disable flow control */
3031         }
3032         /**
3033          * If link auto negotiation is enabled, flow control needs to
3034          * be configured according to it
3035          */
3036         switch (an_info & I40E_LINK_PAUSE_RXTX) {
3037         case I40E_LINK_PAUSE_RXTX:
3038                 rxfc = 1;
3039                 txfc = 1;
3040                 hw->fc.current_mode = I40E_FC_FULL;
3041                 break;
3042         case I40E_AQ_LINK_PAUSE_RX:
3043                 rxfc = 1;
3044                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
3045                 break;
3046         case I40E_AQ_LINK_PAUSE_TX:
3047                 txfc = 1;
3048                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
3049                 break;
3050         default:
3051                 hw->fc.current_mode = I40E_FC_NONE;
3052                 break;
3053         }
3054
3055 write_reg:
3056         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
3057                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
3058         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3059         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
3060         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
3061         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
3062
3063         return ret;
3064 }
3065
3066 /* PF setup */
3067 static int
3068 i40e_pf_setup(struct i40e_pf *pf)
3069 {
3070         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3071         struct i40e_filter_control_settings settings;
3072         struct rte_eth_dev_data *dev_data = pf->dev_data;
3073         struct i40e_vsi *vsi;
3074         int ret;
3075
3076         /* Clear all stats counters */
3077         pf->offset_loaded = FALSE;
3078         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
3079         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
3080
3081         ret = i40e_pf_get_switch_config(pf);
3082         if (ret != I40E_SUCCESS) {
3083                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
3084                 return ret;
3085         }
3086
3087         /* VSI setup */
3088         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
3089         if (!vsi) {
3090                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
3091                 return I40E_ERR_NOT_READY;
3092         }
3093         pf->main_vsi = vsi;
3094         dev_data->nb_rx_queues = vsi->nb_qps;
3095         dev_data->nb_tx_queues = vsi->nb_qps;
3096
3097         /* Configure filter control */
3098         memset(&settings, 0, sizeof(settings));
3099         settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
3100         /* Enable ethtype and macvlan filters */
3101         settings.enable_ethtype = TRUE;
3102         settings.enable_macvlan = TRUE;
3103         ret = i40e_set_filter_control(hw, &settings);
3104         if (ret)
3105                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
3106                                                                 ret);
3107
3108         /* Update flow control according to the auto negotiation */
3109         i40e_update_flow_control(hw);
3110
3111         return I40E_SUCCESS;
3112 }
3113
3114 int
3115 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3116 {
3117         uint32_t reg;
3118         uint16_t j;
3119
3120         /**
3121          * Set or clear TX Queue Disable flags,
3122          * which is required by hardware.
3123          */
3124         i40e_pre_tx_queue_cfg(hw, q_idx, on);
3125         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
3126
3127         /* Wait until the request is finished */
3128         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3129                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3130                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3131                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3132                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
3133                                                         & 0x1))) {
3134                         break;
3135                 }
3136         }
3137         if (on) {
3138                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3139                         return I40E_SUCCESS; /* already on, skip next steps */
3140
3141                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
3142                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3143         } else {
3144                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3145                         return I40E_SUCCESS; /* already off, skip next steps */
3146                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3147         }
3148         /* Write the register */
3149         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3150         /* Check the result */
3151         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3152                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3153                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3154                 if (on) {
3155                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3156                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
3157                                 break;
3158                 } else {
3159                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3160                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3161                                 break;
3162                 }
3163         }
3164         /* Check if it is timeout */
3165         if (j >= I40E_CHK_Q_ENA_COUNT) {
3166                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
3167                             (on ? "enable" : "disable"), q_idx);
3168                 return I40E_ERR_TIMEOUT;
3169         }
3170
3171         return I40E_SUCCESS;
3172 }
3173
3174 /* Swith on or off the tx queues */
3175 static int
3176 i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
3177 {
3178         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3179         struct i40e_tx_queue *txq;
3180         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3181         uint16_t i;
3182         int ret;
3183
3184         for (i = 0; i < dev_data->nb_tx_queues; i++) {
3185                 txq = dev_data->tx_queues[i];
3186                 /* Don't operate the queue if not configured or
3187                  * if starting only per queue */
3188                 if (!txq->q_set || (on && txq->tx_deferred_start))
3189                         continue;
3190                 if (on)
3191                         ret = i40e_dev_tx_queue_start(dev, i);
3192                 else
3193                         ret = i40e_dev_tx_queue_stop(dev, i);
3194                 if ( ret != I40E_SUCCESS)
3195                         return ret;
3196         }
3197
3198         return I40E_SUCCESS;
3199 }
3200
3201 int
3202 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3203 {
3204         uint32_t reg;
3205         uint16_t j;
3206
3207         /* Wait until the request is finished */
3208         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3209                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3210                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3211                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3212                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3213                         break;
3214         }
3215
3216         if (on) {
3217                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3218                         return I40E_SUCCESS; /* Already on, skip next steps */
3219                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3220         } else {
3221                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3222                         return I40E_SUCCESS; /* Already off, skip next steps */
3223                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3224         }
3225
3226         /* Write the register */
3227         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3228         /* Check the result */
3229         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3230                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3231                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3232                 if (on) {
3233                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3234                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3235                                 break;
3236                 } else {
3237                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3238                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3239                                 break;
3240                 }
3241         }
3242
3243         /* Check if it is timeout */
3244         if (j >= I40E_CHK_Q_ENA_COUNT) {
3245                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
3246                             (on ? "enable" : "disable"), q_idx);
3247                 return I40E_ERR_TIMEOUT;
3248         }
3249
3250         return I40E_SUCCESS;
3251 }
3252 /* Switch on or off the rx queues */
3253 static int
3254 i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
3255 {
3256         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3257         struct i40e_rx_queue *rxq;
3258         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3259         uint16_t i;
3260         int ret;
3261
3262         for (i = 0; i < dev_data->nb_rx_queues; i++) {
3263                 rxq = dev_data->rx_queues[i];
3264                 /* Don't operate the queue if not configured or
3265                  * if starting only per queue */
3266                 if (!rxq->q_set || (on && rxq->rx_deferred_start))
3267                         continue;
3268                 if (on)
3269                         ret = i40e_dev_rx_queue_start(dev, i);
3270                 else
3271                         ret = i40e_dev_rx_queue_stop(dev, i);
3272                 if (ret != I40E_SUCCESS)
3273                         return ret;
3274         }
3275
3276         return I40E_SUCCESS;
3277 }
3278
3279 /* Switch on or off all the rx/tx queues */
3280 int
3281 i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
3282 {
3283         int ret;
3284
3285         if (on) {
3286                 /* enable rx queues before enabling tx queues */
3287                 ret = i40e_vsi_switch_rx_queues(vsi, on);
3288                 if (ret) {
3289                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
3290                         return ret;
3291                 }
3292                 ret = i40e_vsi_switch_tx_queues(vsi, on);
3293         } else {
3294                 /* Stop tx queues before stopping rx queues */
3295                 ret = i40e_vsi_switch_tx_queues(vsi, on);
3296                 if (ret) {
3297                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
3298                         return ret;
3299                 }
3300                 ret = i40e_vsi_switch_rx_queues(vsi, on);
3301         }
3302
3303         return ret;
3304 }
3305
3306 /* Initialize VSI for TX */
3307 static int
3308 i40e_vsi_tx_init(struct i40e_vsi *vsi)
3309 {
3310         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3311         struct rte_eth_dev_data *data = pf->dev_data;
3312         uint16_t i;
3313         uint32_t ret = I40E_SUCCESS;
3314
3315         for (i = 0; i < data->nb_tx_queues; i++) {
3316                 ret = i40e_tx_queue_init(data->tx_queues[i]);
3317                 if (ret != I40E_SUCCESS)
3318                         break;
3319         }
3320
3321         return ret;
3322 }
3323
3324 /* Initialize VSI for RX */
3325 static int
3326 i40e_vsi_rx_init(struct i40e_vsi *vsi)
3327 {
3328         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3329         struct rte_eth_dev_data *data = pf->dev_data;
3330         int ret = I40E_SUCCESS;
3331         uint16_t i;
3332
3333         i40e_pf_config_mq_rx(pf);
3334         for (i = 0; i < data->nb_rx_queues; i++) {
3335                 ret = i40e_rx_queue_init(data->rx_queues[i]);
3336                 if (ret != I40E_SUCCESS) {
3337                         PMD_DRV_LOG(ERR, "Failed to do RX queue "
3338                                     "initialization");
3339                         break;
3340                 }
3341         }
3342
3343         return ret;
3344 }
3345
3346 /* Initialize VSI */
3347 static int
3348 i40e_vsi_init(struct i40e_vsi *vsi)
3349 {
3350         int err;
3351
3352         err = i40e_vsi_tx_init(vsi);
3353         if (err) {
3354                 PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization");
3355                 return err;
3356         }
3357         err = i40e_vsi_rx_init(vsi);
3358         if (err) {
3359                 PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization");
3360                 return err;
3361         }
3362
3363         return err;
3364 }
3365
3366 static void
3367 i40e_stat_update_32(struct i40e_hw *hw,
3368                    uint32_t reg,
3369                    bool offset_loaded,
3370                    uint64_t *offset,
3371                    uint64_t *stat)
3372 {
3373         uint64_t new_data;
3374
3375         new_data = (uint64_t)I40E_READ_REG(hw, reg);
3376         if (!offset_loaded)
3377                 *offset = new_data;
3378
3379         if (new_data >= *offset)
3380                 *stat = (uint64_t)(new_data - *offset);
3381         else
3382                 *stat = (uint64_t)((new_data +
3383                         ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
3384 }
3385
3386 static void
3387 i40e_stat_update_48(struct i40e_hw *hw,
3388                    uint32_t hireg,
3389                    uint32_t loreg,
3390                    bool offset_loaded,
3391                    uint64_t *offset,
3392                    uint64_t *stat)
3393 {
3394         uint64_t new_data;
3395
3396         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3397         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3398                         I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
3399
3400         if (!offset_loaded)
3401                 *offset = new_data;
3402
3403         if (new_data >= *offset)
3404                 *stat = new_data - *offset;
3405         else
3406                 *stat = (uint64_t)((new_data +
3407                         ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
3408
3409         *stat &= I40E_48_BIT_MASK;
3410 }
3411
3412 /* Disable IRQ0 */
3413 void
3414 i40e_pf_disable_irq0(struct i40e_hw *hw)
3415 {
3416         /* Disable all interrupt types */
3417         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3418         I40E_WRITE_FLUSH(hw);
3419 }
3420
3421 /* Enable IRQ0 */
3422 void
3423 i40e_pf_enable_irq0(struct i40e_hw *hw)
3424 {
3425         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3426                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3427                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3428                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3429         I40E_WRITE_FLUSH(hw);
3430 }
3431
3432 static void
3433 i40e_pf_config_irq0(struct i40e_hw *hw)
3434 {
3435         /* read pending request and disable first */
3436         i40e_pf_disable_irq0(hw);
3437         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
3438         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3439                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3440
3441         /* Link no queues with irq0 */
3442         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3443                 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3444 }
3445
3446 static void
3447 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3448 {
3449         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3450         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3451         int i;
3452         uint16_t abs_vf_id;
3453         uint32_t index, offset, val;
3454
3455         if (!pf->vfs)
3456                 return;
3457         /**
3458          * Try to find which VF trigger a reset, use absolute VF id to access
3459          * since the reg is global register.
3460          */
3461         for (i = 0; i < pf->vf_num; i++) {
3462                 abs_vf_id = hw->func_caps.vf_base_id + i;
3463                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3464                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3465                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3466                 /* VFR event occured */
3467                 if (val & (0x1 << offset)) {
3468                         int ret;
3469
3470                         /* Clear the event first */
3471                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3472                                                         (0x1 << offset));
3473                         PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
3474                         /**
3475                          * Only notify a VF reset event occured,
3476                          * don't trigger another SW reset
3477                          */
3478                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3479                         if (ret != I40E_SUCCESS)
3480                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
3481                 }
3482         }
3483 }
3484
3485 static void
3486 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3487 {
3488         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3489         struct i40e_arq_event_info info;
3490         uint16_t pending, opcode;
3491         int ret;
3492
3493         info.buf_len = I40E_AQ_BUF_SZ;
3494         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
3495         if (!info.msg_buf) {
3496                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
3497                 return;
3498         }
3499
3500         pending = 1;
3501         while (pending) {
3502                 ret = i40e_clean_arq_element(hw, &info, &pending);
3503
3504                 if (ret != I40E_SUCCESS) {
3505                         PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3506                                     "aq_err: %u", hw->aq.asq_last_status);
3507                         break;
3508                 }
3509                 opcode = rte_le_to_cpu_16(info.desc.opcode);
3510
3511                 switch (opcode) {
3512                 case i40e_aqc_opc_send_msg_to_pf:
3513                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3514                         i40e_pf_host_handle_vf_msg(dev,
3515                                         rte_le_to_cpu_16(info.desc.retval),
3516                                         rte_le_to_cpu_32(info.desc.cookie_high),
3517                                         rte_le_to_cpu_32(info.desc.cookie_low),
3518                                         info.msg_buf,
3519                                         info.msg_len);
3520                         break;
3521                 default:
3522                         PMD_DRV_LOG(ERR, "Request %u is not supported yet",
3523                                     opcode);
3524                         break;
3525                 }
3526         }
3527         rte_free(info.msg_buf);
3528 }
3529
3530 /*
3531  * Interrupt handler is registered as the alarm callback for handling LSC
3532  * interrupt in a definite of time, in order to wait the NIC into a stable
3533  * state. Currently it waits 1 sec in i40e for the link up interrupt, and
3534  * no need for link down interrupt.
3535  */
3536 static void
3537 i40e_dev_interrupt_delayed_handler(void *param)
3538 {
3539         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3540         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3541         uint32_t icr0;
3542
3543         /* read interrupt causes again */
3544         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3545
3546 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
3547         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
3548                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
3549         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3550                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
3551         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
3552                 PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
3553         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3554                 PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
3555         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
3556                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
3557                                                                 "state\n");
3558         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
3559                 PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
3560         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
3561                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
3562 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
3563
3564         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3565                 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
3566                 i40e_dev_handle_vfr_event(dev);
3567         }
3568         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3569                 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
3570                 i40e_dev_handle_aq_msg(dev);
3571         }
3572
3573         /* handle the link up interrupt in an alarm callback */
3574         i40e_dev_link_update(dev, 0);
3575         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
3576
3577         i40e_pf_enable_irq0(hw);
3578         rte_intr_enable(&(dev->pci_dev->intr_handle));
3579 }
3580
3581 /**
3582  * Interrupt handler triggered by NIC  for handling
3583  * specific interrupt.
3584  *
3585  * @param handle
3586  *  Pointer to interrupt handle.
3587  * @param param
3588  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3589  *
3590  * @return
3591  *  void
3592  */
3593 static void
3594 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3595                            void *param)
3596 {
3597         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3598         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3599         uint32_t icr0;
3600
3601         /* Disable interrupt */
3602         i40e_pf_disable_irq0(hw);
3603
3604         /* read out interrupt causes */
3605         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3606
3607         /* No interrupt event indicated */
3608         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3609                 PMD_DRV_LOG(INFO, "No interrupt event");
3610                 goto done;
3611         }
3612 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
3613         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
3614                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
3615         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3616                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
3617         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
3618                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
3619         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3620                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
3621         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
3622                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
3623         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
3624                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
3625         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
3626                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
3627 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
3628
3629         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3630                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
3631                 i40e_dev_handle_vfr_event(dev);
3632         }
3633         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3634                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
3635                 i40e_dev_handle_aq_msg(dev);
3636         }
3637
3638         /* Link Status Change interrupt */
3639         if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
3640 #define I40E_US_PER_SECOND 1000000
3641                 struct rte_eth_link link;
3642
3643                 PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
3644                 memset(&link, 0, sizeof(link));
3645                 rte_i40e_dev_atomic_read_link_status(dev, &link);
3646                 i40e_dev_link_update(dev, 0);
3647
3648                 /*
3649                  * For link up interrupt, it needs to wait 1 second to let the
3650                  * hardware be a stable state. Otherwise several consecutive
3651                  * interrupts can be observed.
3652                  * For link down interrupt, no need to wait.
3653                  */
3654                 if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
3655                         i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
3656                         return;
3657                 else
3658                         _rte_eth_dev_callback_process(dev,
3659                                 RTE_ETH_EVENT_INTR_LSC);
3660         }
3661
3662 done:
3663         /* Enable interrupt */
3664         i40e_pf_enable_irq0(hw);
3665         rte_intr_enable(&(dev->pci_dev->intr_handle));
3666 }
3667
3668 static int
3669 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
3670                          struct i40e_macvlan_filter *filter,
3671                          int total)
3672 {
3673         int ele_num, ele_buff_size;
3674         int num, actual_num, i;
3675         uint16_t flags;
3676         int ret = I40E_SUCCESS;
3677         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3678         struct i40e_aqc_add_macvlan_element_data *req_list;
3679
3680         if (filter == NULL  || total == 0)
3681                 return I40E_ERR_PARAM;
3682         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3683         ele_buff_size = hw->aq.asq_buf_size;
3684
3685         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
3686         if (req_list == NULL) {
3687                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3688                 return I40E_ERR_NO_MEMORY;
3689         }
3690
3691         num = 0;
3692         do {
3693                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3694                 memset(req_list, 0, ele_buff_size);
3695
3696                 for (i = 0; i < actual_num; i++) {
3697                         (void)rte_memcpy(req_list[i].mac_addr,
3698                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
3699                         req_list[i].vlan_tag =
3700                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
3701
3702                         switch (filter[num + i].filter_type) {
3703                         case RTE_MAC_PERFECT_MATCH:
3704                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
3705                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3706                                 break;
3707                         case RTE_MACVLAN_PERFECT_MATCH:
3708                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3709                                 break;
3710                         case RTE_MAC_HASH_MATCH:
3711                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
3712                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3713                                 break;
3714                         case RTE_MACVLAN_HASH_MATCH:
3715                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
3716                                 break;
3717                         default:
3718                                 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
3719                                 ret = I40E_ERR_PARAM;
3720                                 goto DONE;
3721                         }
3722
3723                         req_list[i].queue_number = 0;
3724
3725                         req_list[i].flags = rte_cpu_to_le_16(flags);
3726                 }
3727
3728                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
3729                                                 actual_num, NULL);
3730                 if (ret != I40E_SUCCESS) {
3731                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
3732                         goto DONE;
3733                 }
3734                 num += actual_num;
3735         } while (num < total);
3736
3737 DONE:
3738         rte_free(req_list);
3739         return ret;
3740 }
3741
3742 static int
3743 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
3744                             struct i40e_macvlan_filter *filter,
3745                             int total)
3746 {
3747         int ele_num, ele_buff_size;
3748         int num, actual_num, i;
3749         uint16_t flags;
3750         int ret = I40E_SUCCESS;
3751         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3752         struct i40e_aqc_remove_macvlan_element_data *req_list;
3753
3754         if (filter == NULL  || total == 0)
3755                 return I40E_ERR_PARAM;
3756
3757         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3758         ele_buff_size = hw->aq.asq_buf_size;
3759
3760         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
3761         if (req_list == NULL) {
3762                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3763                 return I40E_ERR_NO_MEMORY;
3764         }
3765
3766         num = 0;
3767         do {
3768                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3769                 memset(req_list, 0, ele_buff_size);
3770
3771                 for (i = 0; i < actual_num; i++) {
3772                         (void)rte_memcpy(req_list[i].mac_addr,
3773                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
3774                         req_list[i].vlan_tag =
3775                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
3776
3777                         switch (filter[num + i].filter_type) {
3778                         case RTE_MAC_PERFECT_MATCH:
3779                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
3780                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3781                                 break;
3782                         case RTE_MACVLAN_PERFECT_MATCH:
3783                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3784                                 break;
3785                         case RTE_MAC_HASH_MATCH:
3786                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
3787                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3788                                 break;
3789                         case RTE_MACVLAN_HASH_MATCH:
3790                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
3791                                 break;
3792                         default:
3793                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
3794                                 ret = I40E_ERR_PARAM;
3795                                 goto DONE;
3796                         }
3797                         req_list[i].flags = rte_cpu_to_le_16(flags);
3798                 }
3799
3800                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
3801                                                 actual_num, NULL);
3802                 if (ret != I40E_SUCCESS) {
3803                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
3804                         goto DONE;
3805                 }
3806                 num += actual_num;
3807         } while (num < total);
3808
3809 DONE:
3810         rte_free(req_list);
3811         return ret;
3812 }
3813
3814 /* Find out specific MAC filter */
3815 static struct i40e_mac_filter *
3816 i40e_find_mac_filter(struct i40e_vsi *vsi,
3817                          struct ether_addr *macaddr)
3818 {
3819         struct i40e_mac_filter *f;
3820
3821         TAILQ_FOREACH(f, &vsi->mac_list, next) {
3822                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
3823                         return f;
3824         }
3825
3826         return NULL;
3827 }
3828
3829 static bool
3830 i40e_find_vlan_filter(struct i40e_vsi *vsi,
3831                          uint16_t vlan_id)
3832 {
3833         uint32_t vid_idx, vid_bit;
3834
3835         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3836         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3837
3838         if (vsi->vfta[vid_idx] & vid_bit)
3839                 return 1;
3840         else
3841                 return 0;
3842 }
3843
3844 static void
3845 i40e_set_vlan_filter(struct i40e_vsi *vsi,
3846                          uint16_t vlan_id, bool on)
3847 {
3848         uint32_t vid_idx, vid_bit;
3849
3850 #define UINT32_BIT_MASK      0x1F
3851 #define VALID_VLAN_BIT_MASK  0xFFF
3852         /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the
3853          *  element first, then find the bits it belongs to
3854          */
3855         vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >>
3856                   sizeof(uint32_t));
3857         vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK));
3858
3859         if (on)
3860                 vsi->vfta[vid_idx] |= vid_bit;
3861         else
3862                 vsi->vfta[vid_idx] &= ~vid_bit;
3863 }
3864
3865 /**
3866  * Find all vlan options for specific mac addr,
3867  * return with actual vlan found.
3868  */
3869 static inline int
3870 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
3871                            struct i40e_macvlan_filter *mv_f,
3872                            int num, struct ether_addr *addr)
3873 {
3874         int i;
3875         uint32_t j, k;
3876
3877         /**
3878          * Not to use i40e_find_vlan_filter to decrease the loop time,
3879          * although the code looks complex.
3880           */
3881         if (num < vsi->vlan_num)
3882                 return I40E_ERR_PARAM;
3883
3884         i = 0;
3885         for (j = 0; j < I40E_VFTA_SIZE; j++) {
3886                 if (vsi->vfta[j]) {
3887                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
3888                                 if (vsi->vfta[j] & (1 << k)) {
3889                                         if (i > num - 1) {
3890                                                 PMD_DRV_LOG(ERR, "vlan number "
3891                                                             "not match");
3892                                                 return I40E_ERR_PARAM;
3893                                         }
3894                                         (void)rte_memcpy(&mv_f[i].macaddr,
3895                                                         addr, ETH_ADDR_LEN);
3896                                         mv_f[i].vlan_id =
3897                                                 j * I40E_UINT32_BIT_SIZE + k;
3898                                         i++;
3899                                 }
3900                         }
3901                 }
3902         }
3903         return I40E_SUCCESS;
3904 }
3905
3906 static inline int
3907 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
3908                            struct i40e_macvlan_filter *mv_f,
3909                            int num,
3910                            uint16_t vlan)
3911 {
3912         int i = 0;
3913         struct i40e_mac_filter *f;
3914
3915         if (num < vsi->mac_num)
3916                 return I40E_ERR_PARAM;
3917
3918         TAILQ_FOREACH(f, &vsi->mac_list, next) {
3919                 if (i > num - 1) {
3920                         PMD_DRV_LOG(ERR, "buffer number not match");
3921                         return I40E_ERR_PARAM;
3922                 }
3923                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
3924                                 ETH_ADDR_LEN);
3925                 mv_f[i].vlan_id = vlan;
3926                 mv_f[i].filter_type = f->mac_info.filter_type;
3927                 i++;
3928         }
3929
3930         return I40E_SUCCESS;
3931 }
3932
3933 static int
3934 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
3935 {
3936         int i, num;
3937         struct i40e_mac_filter *f;
3938         struct i40e_macvlan_filter *mv_f;
3939         int ret = I40E_SUCCESS;
3940
3941         if (vsi == NULL || vsi->mac_num == 0)
3942                 return I40E_ERR_PARAM;
3943
3944         /* Case that no vlan is set */
3945         if (vsi->vlan_num == 0)
3946                 num = vsi->mac_num;
3947         else
3948                 num = vsi->mac_num * vsi->vlan_num;
3949
3950         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
3951         if (mv_f == NULL) {
3952                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3953                 return I40E_ERR_NO_MEMORY;
3954         }
3955
3956         i = 0;
3957         if (vsi->vlan_num == 0) {
3958                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3959                         (void)rte_memcpy(&mv_f[i].macaddr,
3960                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
3961                         mv_f[i].vlan_id = 0;
3962                         i++;
3963                 }
3964         } else {
3965                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3966                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
3967                                         vsi->vlan_num, &f->mac_info.mac_addr);
3968                         if (ret != I40E_SUCCESS)
3969                                 goto DONE;
3970                         i += vsi->vlan_num;
3971                 }
3972         }
3973
3974         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
3975 DONE:
3976         rte_free(mv_f);
3977
3978         return ret;
3979 }
3980
3981 int
3982 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3983 {
3984         struct i40e_macvlan_filter *mv_f;
3985         int mac_num;
3986         int ret = I40E_SUCCESS;
3987
3988         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
3989                 return I40E_ERR_PARAM;
3990
3991         /* If it's already set, just return */
3992         if (i40e_find_vlan_filter(vsi,vlan))
3993                 return I40E_SUCCESS;
3994
3995         mac_num = vsi->mac_num;
3996
3997         if (mac_num == 0) {
3998                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
3999                 return I40E_ERR_PARAM;
4000         }
4001
4002         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4003
4004         if (mv_f == NULL) {
4005                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4006                 return I40E_ERR_NO_MEMORY;
4007         }
4008
4009         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4010
4011         if (ret != I40E_SUCCESS)
4012                 goto DONE;
4013
4014         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4015
4016         if (ret != I40E_SUCCESS)
4017                 goto DONE;
4018
4019         i40e_set_vlan_filter(vsi, vlan, 1);
4020
4021         vsi->vlan_num++;
4022         ret = I40E_SUCCESS;
4023 DONE:
4024         rte_free(mv_f);
4025         return ret;
4026 }
4027
4028 int
4029 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4030 {
4031         struct i40e_macvlan_filter *mv_f;
4032         int mac_num;
4033         int ret = I40E_SUCCESS;
4034
4035         /**
4036          * Vlan 0 is the generic filter for untagged packets
4037          * and can't be removed.
4038          */
4039         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
4040                 return I40E_ERR_PARAM;
4041
4042         /* If can't find it, just return */
4043         if (!i40e_find_vlan_filter(vsi, vlan))
4044                 return I40E_ERR_PARAM;
4045
4046         mac_num = vsi->mac_num;
4047
4048         if (mac_num == 0) {
4049                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4050                 return I40E_ERR_PARAM;
4051         }
4052
4053         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4054
4055         if (mv_f == NULL) {
4056                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4057                 return I40E_ERR_NO_MEMORY;
4058         }
4059
4060         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4061
4062         if (ret != I40E_SUCCESS)
4063                 goto DONE;
4064
4065         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
4066
4067         if (ret != I40E_SUCCESS)
4068                 goto DONE;
4069
4070         /* This is last vlan to remove, replace all mac filter with vlan 0 */
4071         if (vsi->vlan_num == 1) {
4072                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
4073                 if (ret != I40E_SUCCESS)
4074                         goto DONE;
4075
4076                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4077                 if (ret != I40E_SUCCESS)
4078                         goto DONE;
4079         }
4080
4081         i40e_set_vlan_filter(vsi, vlan, 0);
4082
4083         vsi->vlan_num--;
4084         ret = I40E_SUCCESS;
4085 DONE:
4086         rte_free(mv_f);
4087         return ret;
4088 }
4089
4090 int
4091 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
4092 {
4093         struct i40e_mac_filter *f;
4094         struct i40e_macvlan_filter *mv_f;
4095         int i, vlan_num = 0;
4096         int ret = I40E_SUCCESS;
4097
4098         /* If it's add and we've config it, return */
4099         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
4100         if (f != NULL)
4101                 return I40E_SUCCESS;
4102         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
4103                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
4104
4105                 /**
4106                  * If vlan_num is 0, that's the first time to add mac,
4107                  * set mask for vlan_id 0.
4108                  */
4109                 if (vsi->vlan_num == 0) {
4110                         i40e_set_vlan_filter(vsi, 0, 1);
4111                         vsi->vlan_num = 1;
4112                 }
4113                 vlan_num = vsi->vlan_num;
4114         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
4115                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
4116                 vlan_num = 1;
4117
4118         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4119         if (mv_f == NULL) {
4120                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4121                 return I40E_ERR_NO_MEMORY;
4122         }
4123
4124         for (i = 0; i < vlan_num; i++) {
4125                 mv_f[i].filter_type = mac_filter->filter_type;
4126                 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
4127                                 ETH_ADDR_LEN);
4128         }
4129
4130         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4131                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
4132                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
4133                                         &mac_filter->mac_addr);
4134                 if (ret != I40E_SUCCESS)
4135                         goto DONE;
4136         }
4137
4138         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
4139         if (ret != I40E_SUCCESS)
4140                 goto DONE;
4141
4142         /* Add the mac addr into mac list */
4143         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4144         if (f == NULL) {
4145                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4146                 ret = I40E_ERR_NO_MEMORY;
4147                 goto DONE;
4148         }
4149         (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
4150                         ETH_ADDR_LEN);
4151         f->mac_info.filter_type = mac_filter->filter_type;
4152         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4153         vsi->mac_num++;
4154
4155         ret = I40E_SUCCESS;
4156 DONE:
4157         rte_free(mv_f);
4158
4159         return ret;
4160 }
4161
4162 int
4163 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
4164 {
4165         struct i40e_mac_filter *f;
4166         struct i40e_macvlan_filter *mv_f;
4167         int i, vlan_num;
4168         enum rte_mac_filter_type filter_type;
4169         int ret = I40E_SUCCESS;
4170
4171         /* Can't find it, return an error */
4172         f = i40e_find_mac_filter(vsi, addr);
4173         if (f == NULL)
4174                 return I40E_ERR_PARAM;
4175
4176         vlan_num = vsi->vlan_num;
4177         filter_type = f->mac_info.filter_type;
4178         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4179                 filter_type == RTE_MACVLAN_HASH_MATCH) {
4180                 if (vlan_num == 0) {
4181                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
4182                         return I40E_ERR_PARAM;
4183                 }
4184         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
4185                         filter_type == RTE_MAC_HASH_MATCH)
4186                 vlan_num = 1;
4187
4188         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4189         if (mv_f == NULL) {
4190                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4191                 return I40E_ERR_NO_MEMORY;
4192         }
4193
4194         for (i = 0; i < vlan_num; i++) {
4195                 mv_f[i].filter_type = filter_type;
4196                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4197                                 ETH_ADDR_LEN);
4198         }
4199         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4200                         filter_type == RTE_MACVLAN_HASH_MATCH) {
4201                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
4202                 if (ret != I40E_SUCCESS)
4203                         goto DONE;
4204         }
4205
4206         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
4207         if (ret != I40E_SUCCESS)
4208                 goto DONE;
4209
4210         /* Remove the mac addr into mac list */
4211         TAILQ_REMOVE(&vsi->mac_list, f, next);
4212         rte_free(f);
4213         vsi->mac_num--;
4214
4215         ret = I40E_SUCCESS;
4216 DONE:
4217         rte_free(mv_f);
4218         return ret;
4219 }
4220
4221 /* Configure hash enable flags for RSS */
4222 uint64_t
4223 i40e_config_hena(uint64_t flags)
4224 {
4225         uint64_t hena = 0;
4226
4227         if (!flags)
4228                 return hena;
4229
4230         if (flags & ETH_RSS_NONF_IPV4_UDP)
4231                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4232         if (flags & ETH_RSS_NONF_IPV4_TCP)
4233                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
4234         if (flags & ETH_RSS_NONF_IPV4_SCTP)
4235                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
4236         if (flags & ETH_RSS_NONF_IPV4_OTHER)
4237                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
4238         if (flags & ETH_RSS_FRAG_IPV4)
4239                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
4240         if (flags & ETH_RSS_NONF_IPV6_UDP)
4241                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
4242         if (flags & ETH_RSS_NONF_IPV6_TCP)
4243                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
4244         if (flags & ETH_RSS_NONF_IPV6_SCTP)
4245                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
4246         if (flags & ETH_RSS_NONF_IPV6_OTHER)
4247                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
4248         if (flags & ETH_RSS_FRAG_IPV6)
4249                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
4250         if (flags & ETH_RSS_L2_PAYLOAD)
4251                 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
4252
4253         return hena;
4254 }
4255
4256 /* Parse the hash enable flags */
4257 uint64_t
4258 i40e_parse_hena(uint64_t flags)
4259 {
4260         uint64_t rss_hf = 0;
4261
4262         if (!flags)
4263                 return rss_hf;
4264
4265         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
4266                 rss_hf |= ETH_RSS_NONF_IPV4_UDP;
4267         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
4268                 rss_hf |= ETH_RSS_NONF_IPV4_TCP;
4269         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
4270                 rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
4271         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
4272                 rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
4273         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
4274                 rss_hf |= ETH_RSS_FRAG_IPV4;
4275         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
4276                 rss_hf |= ETH_RSS_NONF_IPV6_UDP;
4277         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
4278                 rss_hf |= ETH_RSS_NONF_IPV6_TCP;
4279         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
4280                 rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
4281         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
4282                 rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
4283         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
4284                 rss_hf |= ETH_RSS_FRAG_IPV6;
4285         if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
4286                 rss_hf |= ETH_RSS_L2_PAYLOAD;
4287
4288         return rss_hf;
4289 }
4290
4291 /* Disable RSS */
4292 static void
4293 i40e_pf_disable_rss(struct i40e_pf *pf)
4294 {
4295         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4296         uint64_t hena;
4297
4298         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4299         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4300         hena &= ~I40E_RSS_HENA_ALL;
4301         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4302         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4303         I40E_WRITE_FLUSH(hw);
4304 }
4305
4306 static int
4307 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4308 {
4309         uint32_t *hash_key;
4310         uint8_t hash_key_len;
4311         uint64_t rss_hf;
4312         uint16_t i;
4313         uint64_t hena;
4314
4315         hash_key = (uint32_t *)(rss_conf->rss_key);
4316         hash_key_len = rss_conf->rss_key_len;
4317         if (hash_key != NULL && hash_key_len >=
4318                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4319                 /* Fill in RSS hash key */
4320                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4321                         I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4322         }
4323
4324         rss_hf = rss_conf->rss_hf;
4325         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4326         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4327         hena &= ~I40E_RSS_HENA_ALL;
4328         hena |= i40e_config_hena(rss_hf);
4329         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4330         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4331         I40E_WRITE_FLUSH(hw);
4332
4333         return 0;
4334 }
4335
4336 static int
4337 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4338                          struct rte_eth_rss_conf *rss_conf)
4339 {
4340         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4341         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4342         uint64_t hena;
4343
4344         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4345         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4346         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4347                 if (rss_hf != 0) /* Enable RSS */
4348                         return -EINVAL;
4349                 return 0; /* Nothing to do */
4350         }
4351         /* RSS enabled */
4352         if (rss_hf == 0) /* Disable RSS */
4353                 return -EINVAL;
4354
4355         return i40e_hw_rss_hash_set(hw, rss_conf);
4356 }
4357
4358 static int
4359 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4360                            struct rte_eth_rss_conf *rss_conf)
4361 {
4362         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4363         uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4364         uint64_t hena;
4365         uint16_t i;
4366
4367         if (hash_key != NULL) {
4368                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4369                         hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4370                 rss_conf->rss_key_len = i * sizeof(uint32_t);
4371         }
4372         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4373         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4374         rss_conf->rss_hf = i40e_parse_hena(hena);
4375
4376         return 0;
4377 }
4378
4379 static int
4380 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
4381 {
4382         switch (filter_type) {
4383         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
4384                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
4385                 break;
4386         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
4387                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
4388                 break;
4389         case RTE_TUNNEL_FILTER_IMAC_TENID:
4390                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
4391                 break;
4392         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
4393                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
4394                 break;
4395         case ETH_TUNNEL_FILTER_IMAC:
4396                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
4397                 break;
4398         default:
4399                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
4400                 return -EINVAL;
4401         }
4402
4403         return 0;
4404 }
4405
4406 static int
4407 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
4408                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
4409                         uint8_t add)
4410 {
4411         uint16_t ip_type;
4412         uint8_t tun_type = 0;
4413         int val, ret = 0;
4414         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4415         struct i40e_vsi *vsi = pf->main_vsi;
4416         struct i40e_aqc_add_remove_cloud_filters_element_data  *cld_filter;
4417         struct i40e_aqc_add_remove_cloud_filters_element_data  *pfilter;
4418
4419         cld_filter = rte_zmalloc("tunnel_filter",
4420                 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
4421                 0);
4422
4423         if (NULL == cld_filter) {
4424                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
4425                 return -EINVAL;
4426         }
4427         pfilter = cld_filter;
4428
4429         (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
4430                         sizeof(struct ether_addr));
4431         (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
4432                         sizeof(struct ether_addr));
4433
4434         pfilter->inner_vlan = tunnel_filter->inner_vlan;
4435         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
4436                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
4437                 (void)rte_memcpy(&pfilter->ipaddr.v4.data,
4438                                 &tunnel_filter->ip_addr,
4439                                 sizeof(pfilter->ipaddr.v4.data));
4440         } else {
4441                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
4442                 (void)rte_memcpy(&pfilter->ipaddr.v6.data,
4443                                 &tunnel_filter->ip_addr,
4444                                 sizeof(pfilter->ipaddr.v6.data));
4445         }
4446
4447         /* check tunneled type */
4448         switch (tunnel_filter->tunnel_type) {
4449         case RTE_TUNNEL_TYPE_VXLAN:
4450                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
4451                 break;
4452         default:
4453                 /* Other tunnel types is not supported. */
4454                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
4455                 rte_free(cld_filter);
4456                 return -EINVAL;
4457         }
4458
4459         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
4460                                                 &pfilter->flags);
4461         if (val < 0) {
4462                 rte_free(cld_filter);
4463                 return -EINVAL;
4464         }
4465
4466         pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
4467                 (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
4468         pfilter->tenant_id = tunnel_filter->tenant_id;
4469         pfilter->queue_number = tunnel_filter->queue_id;
4470
4471         if (add)
4472                 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
4473         else
4474                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4475                                                 cld_filter, 1);
4476
4477         rte_free(cld_filter);
4478         return ret;
4479 }
4480
4481 static int
4482 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
4483 {
4484         uint8_t i;
4485
4486         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
4487                 if (pf->vxlan_ports[i] == port)
4488                         return i;
4489         }
4490
4491         return -1;
4492 }
4493
4494 static int
4495 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
4496 {
4497         int  idx, ret;
4498         uint8_t filter_idx;
4499         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4500
4501         idx = i40e_get_vxlan_port_idx(pf, port);
4502
4503         /* Check if port already exists */
4504         if (idx >= 0) {
4505                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
4506                 return -EINVAL;
4507         }
4508
4509         /* Now check if there is space to add the new port */
4510         idx = i40e_get_vxlan_port_idx(pf, 0);
4511         if (idx < 0) {
4512                 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
4513                         "not adding port %d", port);
4514                 return -ENOSPC;
4515         }
4516
4517         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
4518                                         &filter_idx, NULL);
4519         if (ret < 0) {
4520                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
4521                 return -1;
4522         }
4523
4524         PMD_DRV_LOG(INFO, "Added %s port %d with AQ command with index %d",
4525                          port,  filter_index);
4526
4527         /* New port: add it and mark its index in the bitmap */
4528         pf->vxlan_ports[idx] = port;
4529         pf->vxlan_bitmap |= (1 << idx);
4530
4531         if (!(pf->flags & I40E_FLAG_VXLAN))
4532                 pf->flags |= I40E_FLAG_VXLAN;
4533
4534         return 0;
4535 }
4536
4537 static int
4538 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
4539 {
4540         int idx;
4541         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4542
4543         if (!(pf->flags & I40E_FLAG_VXLAN)) {
4544                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
4545                 return -EINVAL;
4546         }
4547
4548         idx = i40e_get_vxlan_port_idx(pf, port);
4549
4550         if (idx < 0) {
4551                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
4552                 return -EINVAL;
4553         }
4554
4555         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
4556                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
4557                 return -1;
4558         }
4559
4560         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
4561                         port, idx);
4562
4563         pf->vxlan_ports[idx] = 0;
4564         pf->vxlan_bitmap &= ~(1 << idx);
4565
4566         if (!pf->vxlan_bitmap)
4567                 pf->flags &= ~I40E_FLAG_VXLAN;
4568
4569         return 0;
4570 }
4571
4572 /* Add UDP tunneling port */
4573 static int
4574 i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
4575                         struct rte_eth_udp_tunnel *udp_tunnel)
4576 {
4577         int ret = 0;
4578         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4579
4580         if (udp_tunnel == NULL)
4581                 return -EINVAL;
4582
4583         switch (udp_tunnel->prot_type) {
4584         case RTE_TUNNEL_TYPE_VXLAN:
4585                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
4586                 break;
4587
4588         case RTE_TUNNEL_TYPE_GENEVE:
4589         case RTE_TUNNEL_TYPE_TEREDO:
4590                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4591                 ret = -1;
4592                 break;
4593
4594         default:
4595                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4596                 ret = -1;
4597                 break;
4598         }
4599
4600         return ret;
4601 }
4602
4603 /* Remove UDP tunneling port */
4604 static int
4605 i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
4606                         struct rte_eth_udp_tunnel *udp_tunnel)
4607 {
4608         int ret = 0;
4609         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4610
4611         if (udp_tunnel == NULL)
4612                 return -EINVAL;
4613
4614         switch (udp_tunnel->prot_type) {
4615         case RTE_TUNNEL_TYPE_VXLAN:
4616                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
4617                 break;
4618         case RTE_TUNNEL_TYPE_GENEVE:
4619         case RTE_TUNNEL_TYPE_TEREDO:
4620                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4621                 ret = -1;
4622                 break;
4623         default:
4624                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4625                 ret = -1;
4626                 break;
4627         }
4628
4629         return ret;
4630 }
4631
4632 /* Configure RSS */
4633 static int
4634 i40e_pf_config_rss(struct i40e_pf *pf)
4635 {
4636         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4637         struct rte_eth_rss_conf rss_conf;
4638         uint32_t i, lut = 0;
4639         uint16_t j, num = i40e_prev_power_of_2(pf->dev_data->nb_rx_queues);
4640
4641         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
4642                 if (j == num)
4643                         j = 0;
4644                 lut = (lut << 8) | (j & ((0x1 <<
4645                         hw->func_caps.rss_table_entry_width) - 1));
4646                 if ((i & 3) == 3)
4647                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
4648         }
4649
4650         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
4651         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
4652                 i40e_pf_disable_rss(pf);
4653                 return 0;
4654         }
4655         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
4656                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4657                 /* Calculate the default hash key */
4658                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4659                         rss_key_default[i] = (uint32_t)rte_rand();
4660                 rss_conf.rss_key = (uint8_t *)rss_key_default;
4661                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
4662                                                         sizeof(uint32_t);
4663         }
4664
4665         return i40e_hw_rss_hash_set(hw, &rss_conf);
4666 }
4667
4668 static int
4669 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
4670                         struct rte_eth_tunnel_filter_conf *filter)
4671 {
4672         if (pf == NULL || filter == NULL) {
4673                 PMD_DRV_LOG(ERR, "Invalid parameter");
4674                 return -EINVAL;
4675         }
4676
4677         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
4678                 PMD_DRV_LOG(ERR, "Invalid queue ID");
4679                 return -EINVAL;
4680         }
4681
4682         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
4683                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
4684                 return -EINVAL;
4685         }
4686
4687         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
4688                 (is_zero_ether_addr(filter->outer_mac))) {
4689                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
4690                 return -EINVAL;
4691         }
4692
4693         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
4694                 (is_zero_ether_addr(filter->inner_mac))) {
4695                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
4696                 return -EINVAL;
4697         }
4698
4699         return 0;
4700 }
4701
4702 static int
4703 i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
4704                         void *arg)
4705 {
4706         struct rte_eth_tunnel_filter_conf *filter;
4707         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4708         int ret = I40E_SUCCESS;
4709
4710         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
4711
4712         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
4713                 return I40E_ERR_PARAM;
4714
4715         switch (filter_op) {
4716         case RTE_ETH_FILTER_NOP:
4717                 if (!(pf->flags & I40E_FLAG_VXLAN))
4718                         ret = I40E_NOT_SUPPORTED;
4719         case RTE_ETH_FILTER_ADD:
4720                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
4721                 break;
4722         case RTE_ETH_FILTER_DELETE:
4723                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
4724                 break;
4725         default:
4726                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
4727                 ret = I40E_ERR_PARAM;
4728                 break;
4729         }
4730
4731         return ret;
4732 }
4733
4734 static int
4735 i40e_pf_config_mq_rx(struct i40e_pf *pf)
4736 {
4737         if (!pf->dev_data->sriov.active) {
4738                 switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
4739                 case ETH_MQ_RX_RSS:
4740                         i40e_pf_config_rss(pf);
4741                         break;
4742                 default:
4743                         i40e_pf_disable_rss(pf);
4744                         break;
4745                 }
4746         }
4747
4748         return 0;
4749 }
4750
4751 static int
4752 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
4753                      enum rte_filter_type filter_type,
4754                      enum rte_filter_op filter_op,
4755                      void *arg)
4756 {
4757         int ret = 0;
4758
4759         if (dev == NULL)
4760                 return -EINVAL;
4761
4762         switch (filter_type) {
4763         case RTE_ETH_FILTER_MACVLAN:
4764                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
4765                 break;
4766         case RTE_ETH_FILTER_TUNNEL:
4767                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
4768                 break;
4769         default:
4770                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4771                                                         filter_type);
4772                 ret = -EINVAL;
4773                 break;
4774         }
4775
4776         return ret;
4777 }